dd5983426bebdc9a7890d5db21db45977d375d3c70f796b3657be646da9f5261c0e1552857f592dc26c0a39489e599bc64b217447ff52fe3654fc213d3272c5a3b6e5b98eaaf2de717fdc6191a7fe1d379ce8491598a3e233d457e82174925f1b0fb3b3d9029f85b7a6a4d68f63daca582d9329f33fa558531f756d0fabed5da64a7aa2fa593b29693bddab21605b4563770a48793349c3d27deffad6e327748776f8af4054ce945fdb7770e693086d5bbf6771feabefeee86a85aa36ddae621b3a8ab4a0db525308ec774edba7f6fdde7f5fefa3feb7efb5472856ab46e1fac3f13bec4d19117cfe1f997df7ee8cf60f6262b4adfcb30720d7d13cb635f1d89b18aa8206d02d214c7d0e2c42ffecc6adbee323858e7f9ff3c8114e2f31c4925f9f684cef102a783dcc305e4594cc183a5f61322607cfe341f705bffb52967588fe6ea10eb502465641b74598470eaa1d0c3eabc62a7ae70a6950f48449e88cc94bb5fcad57b14195ddf0921e0d2fe5232ef95b969b8bd95f80b25ebc07aa4f7debaca6601a9ffa74490177e1f5a5d554e51740b5ab27ceb5dc4791d3e942e7ddea5a87b679d7aff9432f558d75d3199ce26fb65573f3cf5bb88e48645d2158a89d3501afd99b9ee289fc93be48beb20391717ddaae064a36ac49f5ba9b5721292e4459899688912065aac776ea38adc9c7e30b0bfa66f523b7a4c502e6b9fa1f93ca9a103b488534e83124b05be4007e7e615a056a62a593f65bce54e5f3dee9cab355b770a4a94fb2c774130e547cdc444406be4d3c415ea8610315f00fa84e3832dabeed31cd2a07e0e0230ff7a1d833e1c00e4d2d2658cc0519ab4ca90b80b640646983f4217117c7031138a360603e1e7c0c27c2e89954415e3c14334bf338817edbf0c84d4669b9fafc15a3709a9e5ff4a5b319d54e716a3eaa53ba488b3a9fa6eaa789984d913dd939d2067a84c634701f595b0a84512c51ca1fd5ee284f9648b9f5f8ab76008f1eab3e2779853bc628405532efbe57f70b3faca665fc7d336cd615d86f8613586b5616e0ba993477bd8dd3595b0f105f80aa9d5ec024c177245fd98fc5d19a288a590b5414b98604c845ff2930f0ba9267015ebeff3b0933023557be6b910a4436754ae3d25d49ba56546f5f59f1780873baf508186d1731911fcfc93b61f72659f2a2e61e0bb0ecdfd671f451a5fc43961614e793ba82b5e7903c6f79a466f84719981357c964d9c5f2e923e7e98266f634d6ca0200c88b0d8d369fffb4cab287f3082d6eaa546ba886abd40a444b28b54c928a2b6f093f6da9a7f96f1706b60386b9fed854cae565d0648e93019e86c122225fd0c539c3e2c7d19bf0e775e34b5ef905f81d21ab2ce444cf582b285e4b26568da46432e12743df4fa738bfcfd800e481fa4a13bee917fee38d4bcda52c5498febea5f3bb2d366482529f3e65edf67eb6fba29478cc9752a9ea7e5fe28ce87a63fcb2ee87b4a2e19d0cbaec4d51afc010c191ac870db03ca2f418dbffffd2cc6c0c01a2a343ecdd0080bf9b3f7f5df3a3d1df51223c8c86c5edf206bc283fab9c06b1d54bfc1a1ef72939d720f3f2fa1566121bdcbb52f2e10270ff344d2d0c77fd227dba913596927299b0f779048853a64744cf4a5aca78fda716cec87802b07ca6dd2f2a95b779fe11252b4bc54433635f51c94ebbd6cd513c1d1f62502fc57f8e942e4fe8c58600abc9a198fac583bf0ee3b6cf0989b9344429d41f5f9245f8319fa0861dad9115c1e5225a61c9c72843c42687db5cb59e18a3fe792bbb2d0d35eabaa890a097311f89a7deff5b128284d964db87916f07a4cb90300ccb6dde4b38dc31c290fe20eb0d7e8eca10f3e9387916c809d4d05c63b1f88dfd55250c3fc5195e2288ac280870465515b3870541785f301646390f901e1cea1cbcb3c793567581161b4c97871b90153493c0568db59d90fc444fe8e3cc3f2251b1fa5070451d1212105e730db436aa9710dfd547dcd40f0095f083eba224b16e0ca0c1be78d6ef1a2405d9f4cd6fc06b5e20bb2d93bc069b8a081efa54bb4160acd8eee514395a8a2b4f72b8356538fa63868ee3685920380bac708211bfd66fc5c883889af14305a8896efef7b1c7278c00ef4d9c472632f2b7d660e99b6fedebf536328231cf2ed4cbcda4cfa784332a9481d71b4f8b0d1f3499d0a64aca015e2bd7bc47bc2bac2382577d8716ee2912c48a00f765c17591e82a484959a4e558b5581b1aa159a7597b091b3389f8727f507020feae305c0616f8f6ad9d9aee13712cf9fda696959999d84ef50b1268d3b7565b0c220cd4bf06caf76aa7799097907344156a407199500ad4311deee361728bfcf753ce4fb404764e0cb40877041de077ea1fc1ec0115af1e2b2181a511de8012e0b56375df11f8e0b1f6ae6c0765d2907890d72493b342f61b47ad3c7513ea09c2d08b4d6c2ebf71ed8292457161a80c9efa8fb7ca7e4621798bc35fc26c10c857e44c0d7ea63084cdc029c9d809a7cd7dc6ddddabf0c8fb8c3560b3214e5488fcd1eca5fc2608eac431f7b3f8a92316056a3850941fe062713e4590841e3abb7216c5b56658c78151a776806e0bf52aa07d826e07a6a90f6ba4ba6eac81f81c41a17cb55cf894ad19adb5b39f8c865adaa73fd1f33735819d8c9cf50096f5065dd82e2805a03da4de2518ded5b1038e383de49cd2e0f1d875f972898381c8ec5bdd731442f5b4bbcdbe1b44086eb25015557557fbdd0de79562c9bbff095f59d71b35082610a5ce3a153e3e59d972d68862cb7ec177991371c72a422c5df2882e098c1b747b960e353e995e36c6832f27d5e29babdf3b74a79719a32447a885e1adfb25a74d6ae8452bfad0ff832a889f47fe7113589bfef48d2b88ea94363a8a836267e6c9eb1db6cd950d8c0f3469c1565b18f56ede3d4ffd2c5b3a878d8b7026a243fc04a2f12148ad0a5807a7d4adb61c6e3ed457bf3219cf03815e589171c6b07426dd0692abdb91a663e3b37b4e11d05181299d63549ee0e0472163a1aa65b45f058d07dccc688dfb20d8523dc2898451ab5c1dc5bbcc4f2aa260b2faf3fab37685fc293b3119deca3edc9ce28eb584cc904b11e745c5319a95798fb0ff46b27c6ef3d3ba843050dd2d2c025cff6db1cea1fe5f9bd94c7e11f6af357ef994368b848b6e03baea57c2d2a0a207f26e7a256c7e50c82b2421fbd1c2c27b82f62497a0212e4828af0f1dc104be93a4a39afc3a576c950ac6d10bae80950b3d02bcde820b384df4a3fa33edbc25aeca61a7f61b28c00764640a0b217619f7394da3c746b5e0ccbfe2429ebce3932b8600de6dc243251c7de5a7707669d0ffb9fac49a146f5924805ce2a70c282bd5584fa45b65eed893c80c8fb3f4b86baf4c3bb7cd788a1461944c51d8896a38690be2b6497fe5ef791a11f8121a5b94c554883eeaa972de6e6c314dbe35978b52787668ba1c2e0c94dfec878b4c37f8dd840b6be1c425f92b94fb79d1261bd36a573f17a6b3a474714720255174233360342d96b66fdea23dc1f22a54c3f93e6ebcde31096b7664e3fc2c74db439b8dfee46583bd1b203a1d8746682a6a8413d8d0f84e2075ad08c4aaf26b0f27205d95aedaabc00de025a6ab48052be521561e6d09807e551b1ea9141fe52791df4553b7567d1c72b236f545fec240d1a5a641cd1ce94ea7812a8764d98566e5656875d8822586bb2c2c6a4877b5a7f3a1f16f46b86bd7f2b06e4db1abe4cd3c4263b559bbb68a25510b8f605d191bc229212d06a753ac3b57c7635ec0a88c5f17f6a98fae26b24d21d710c1c81c511ac12f427c70f77dbf003210873b35ce1c8e29fc1928eb8e9823c1b7707192ea6e00b247ef545ced0b62884ec974dbc1f28abd7ed00c9e1d33bb8041d9d51c4629c760f6e9b87f41138ca45e0a111dcdb575f331686f54b60694b295fcdaf7b3b8df7e1bb3693214092d0780e3b8e0fea3f935131f8274bc1348688df4fbcf983b2a7c5d035fa94cbe6d7cfff32a59fbda1a9f4fb3a494050ac41fd9946ce1b8c733e737c2327ce99ac27a621967f9940394202937f1b48970d6ca533673f7476aa3f80c18737e60dd27c136311983642789ac2f710397eee32bfd63f0abdfd18a61f12e1701b2d2b3e55870f36b683b91dc205b7aa96f99621b238745a05e646ff3427da96e1ce8f6733e54f6683893baeac0e83c410b0ced1824b26c6ae341f74864824bbe83507d70b689c3ff2486a3dab0ff82a621d5771fc31d9b1b4d652c2bf3c86086561e0322359a53d2e736a1bd8cb47e28a328b4f1fb108f57cf536440c86ba7d12a71e512299281846543eab0bd02261f4bf73902589c7b5c89071a77638c523728286088bab0ad194c44bb6e8bb08a9915182b61aa3ef11e7e1cf418895b2ee5adff7c6ea71b05084824535432cc9d45ef980ad5e001a882916318166df12185122937799a7545a1821e67c838a6e7c3338c4103cd6d7f364468867da71a38f181ed0a01d41282d6d270b1cfb5a885941d2484e9da9d6fb6ab39e0381ded7863f8161b8ca38760cb0643c9d7691bffcff4d71522dd86cbac102c34ab5907a1a818febc5c65d65e51381f845afebd3d0249a8a1890017e7dd1ae7b0c508b6a7d5525d2c3f8321dd43a7ad7c23d483778739a33fda33f7636f2301f7d100092576a66229e701834cc426ece28d3de1939360c294a6ca9903146fd4449df8ac56830c5759c1512211571d7c91d7f53499f1545ce4e0c99385c960750b1b6474690adb28a5cfbfb54f0feb3492e74af1cb829cb3942e04a4f6fdbb0004d170e535d059f8c51255c3ba8981355d4d2a8c7396412572a0068e701539be49e4253b06900ff2f9bd02c197d831", 0x1000}, {&(0x7f0000001680)="cd5abeccf78bb129da687e638e9aa4980777db027eb8e6ff4bf405684aafce016154df2ded", 0x25}, {&(0x7f00000016c0)="e0d9fb571271e7c2e4818e7e2fd1fed7590fa04e1137b5e1dbca0a3896fb29b9bf55813d22c381758916beefd5fc8dfafaf3824f19acc3d0218260166fb57950c6c9c23ca817a1bfa07df1936f2f177e2e29609b02ddecb8ba3c4a3082937b4d996eee30061d35460f899f17e3dd334b308d98289a12aff49c7a53785984761ddeec3d5ebd49a785a056baef0f577e1a7a1c656d00023397d0e39d0d32901aa1e04f0d1e6250755a53f2c2f5f381c6bdc085285c40a5b1c2d4970581c893fb1eaed11f43909badc1da7cc39cbfa1ad2c50b4a8bc59f52cd494b0d51ca93b53a4fcf4df16f5", 0xe5}], 0x3}}, {{&(0x7f0000001800)={0x2, 0x4e20, @dev={0xac, 0x14, 0x14, 0x18}}, 0x10, &(0x7f0000001cc0)=[{&(0x7f0000001840)="9a898dc601f47696cc62c63bf283daf15725e2875b257b7e767a3a2068cc45c20b71d4cc44d845e5b797b55dfb14bc8b25a0fdfecb412c54add392361e0eabdc1459e668976aa92e89f9e4e0be99e6592d8a421cd82694c98072ef0adba04ce56a8552f405fad670c7378945bf5be5632e3ef0e023f7bf27508a7833044d8ce8bf85673362176bb3104bd7b7aa434fa5d585c3fa3fbc2fe50b396c7c0817ddf3934f31b211de8b896ef7d333a1f55943705b771b252d14330b44ddb23ce78522b888289056c49a973185", 0xca}, {&(0x7f0000001940)="2b1a12d822554c41bf293dc02172a3f5746d18f8a48277c0d67e083264109149febab0454e0a135a4b03cb4a870d83fd4b7edb1f61a1fdfd3bdd2cc78db45ebc9a7516c3609be38abb7df7cc2ab0f0c766191b17c9c6e815cb3ab53cbf5e5f5b6f5c54b4052fd78ddcb177053abc82", 0x6f}, {&(0x7f00000019c0)="b316d6cad8674aa5baee516b9aaef9102a94fa59325c9cd3f4dd57fb45359dc7d4c21a7022f6d37ea73b6734d38c6a1e7cb7685cab04a548e7a77d186410c9937b5f58f7af36a535e2ead100014ac33d61e54d7f6227e567845fd41bf83a0b5abe5b4fd68ac301f9d7fcacf90df9faaa4d22aacd60db71798e665bb08d70eb26dbd7eba9108a135d1b51408bed3cdcfe6900ebd3581eefed35aae079e023c2a373964f3f34387e1ee68667a612f9af647d6f77befd9326437e0a66fce174e60ced405ff0eb63676a661bbfe16075c041725de61e628326981ed2785088f5c11b", 0xe0}, {&(0x7f0000001ac0)="7f4ab5a9ff0d9379eb8a6d8b02d9bdc4f6169dbd8794aa6994f310318b068f08aa2bcc6df2a69c3e463d7ef726ad27102f0fb8d8112e9cdd27d03e6bc4abb322c15a26d3339f57a492edc80f8f1ef9a5a1cb477152f42275879ca0d12c2aed8c35a084498bff7e5cda1478cc55b91c5c57c3f162fae3b57c081e10f59f51bbcbb517ccd113f07e43b4d992216487499d6baeb058cb7949ebb7d4ad46e015d2749fd25bb2e241c6f0a06fe8554e551ac7cbcd084a3aada5ca73903bc8c5bd", 0xbe}, {&(0x7f0000001b80)="367089a2fd8081982423736879f95a497ff068632920f3e3af2ad7a2f7cca1", 0x1f}, {&(0x7f0000001bc0)="1fb0b276de9a9dd6ce297c49b87b1518779b75abf9721e48fae3f9fb857675261dbdc9a31522bfb310d3f35fff0ac009dee6bf2f051ce1ffdba3fb7d403cb7ec1bc6baf8bbfbddf1cbe2d0549a1c0476cf393d2eaac14488fdd3460ec80d2d4b9505d8bc503ce36ab482870b4e054b16a3cbffca07945a53e22ba6e8bf8eb69782278eb480d5e30bfc26e12f1d64c27d35c0903a3adf92ce59806c8fe734be0d961ce8342117b618201f344c14fd12b49fc5f1487989ea7759f527a2babf61812e0006e17dbe433eccc3987e2f20a378a39989bfde55b0397cd1ad", 0xdb}], 0x6, &(0x7f0000002d80)=[@ip_tos_u8={{0x11, 0x0, 0x1, 0x1}}], 0x18}}, {{0x0, 0x0, &(0x7f0000002ec0)=[{&(0x7f0000002dc0)="0b6eca461bd70e01d19b7ad22f7e32c3c8cfe40d80ea3f5fd61900dda9d525c5a29a02054155ae160d266b0a88fca90067f2c805287592edb537e7affa4663e4e5cfde9464135268aa1cf5468257846a429f33433e28c56d27d5181691635812723b3dd0de2f99d020f59c6fc3feb8232f45857f6be13655fd0f0b3605ca6f312c2d2a9153e3b3d7510cd932a585f12c468b98bbf515b15e79b69202e556bbe96bdfd62b741f16d2f848", 0xaa}, {&(0x7f0000002e80)="9702b6e9d94830306fdb5dce81785e8af507a77cbf", 0x15}], 0x2}}], 0x9, 0x44844) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r6, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) connect$pppl2tp(0xffffffffffffffff, &(0x7f0000000000)=@pppol2tpin6={0x18, 0x1, {0x0, 0xffffffffffffffff, 0x4, 0x1, 0x1, 0x1, {0xa, 0x4e21, 0x420c1ba8, @remote, 0x1ff}}}, 0x32) (async) socket$nl_route(0x10, 0x3, 0x0) (async) socket(0x1, 0x803, 0x0) (async) getsockname$packet(r1, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) (async) sendmsg$nl_route(r0, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000180)=@newlink={0x3c, 0x10, 0x403, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @vlan={{0x9}, {0x4}}}, @IFLA_MASTER={0x8, 0x4, r2}]}, 0x3c}}, 0x0) (async) socket$nl_route(0x10, 0x3, 0x0) (async) socket(0x1, 0x803, 0x0) (async) getsockname$packet(r4, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) (async) sendmsg$nl_route(r3, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000180)=@newlink={0x3c, 0x10, 0x403, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @vlan={{0x9}, {0x4}}}, @IFLA_MASTER={0x8, 0x4, r5}]}, 0x3c}}, 0x0) (async) sendmmsg$inet(0xffffffffffffffff, &(0x7f0000002f00)=[{{&(0x7f0000000080)={0x2, 0x4e22, @private=0xa010102}, 0x10, &(0x7f0000000400)=[{&(0x7f00000000c0)="d8ec7d9fa424195f9c3a64e4bafaeb7e6b2330c8f5eaa3659da77916c696d802fa6635499c23610822ca1704321816975d9f6a7dbd4b2aed1da06582ee349df40e18235f1f400c1432e43889560fdd62ef18091439f03ac13020ca7338de1f1bd491b27ddb80133b725c7e66ad23e7c6a83185754837e5fb4127c0f64315cedc77fb84ad529a2fc2ad42adbe900c58505f50b43b6e7c64b0c46cc5123491d7aa80b9fb7dfea2028e1a7d8ba9666f9ecee2d82770512a0c3dda5b130c4b9eac890186f5e8b4fca3e5fa99aa93986e56f3c36664bd372cda04e450f7414134689709", 0xe1}, {&(0x7f00000001c0)="168a8f6e4fbedce58dfe5a22a94590ad3af8885bfeda908668194dcb13e4eb0d635a3cb72f9a4679d8d6e18dc3d96aa98e93c0a47b4b219ef688311f505615d9434375028c38eb81a71759bc510eec02fc115e3a6e", 0x55}, {&(0x7f0000000240)="5a16536a961e7cb033d1240c04a125a66da07d3ff6043e8e4f030d015436bacdc65f886164afdbb2152e3bcc0dc3", 0x2e}, {&(0x7f0000000280)="ceac996a52bad385e6c04aef76525f0ef3507383165d7aeb41f3bc409e", 0x1d}, {&(0x7f00000002c0)="2f83708fd6523d", 0x7}, {&(0x7f0000000300)="60f9ac0bb60fe922361403126d204f3f786a246a2bf8b6c07e087d4ff4e7343c1d13dc0a5db99426dd003c84522da622a0a6487c848562a3955e22fa3cc5cae984358fd6de668740c47a994904b1c1df6de7623a9457a6c6ba0b676c0bef6fb15b59de8070ad586cc0d2e29d0ff5bba124b1b289e0d687df0d1ee9d4c543a56718adca44c37a7520de17b9c7115417ad20ea75463939a175b8404ded5872fcd08c3e516a95fd3094ac9915d018073847cc4d1aeabf29933ecd8be6f264f1f64a3a4924a5d073e981552c74a4e2b26863aa3bb81a74c8b1def4849578d19cb9", 0xdf}], 0x6, &(0x7f0000000480)=[@ip_tos_int={{0x14, 0x0, 0x1, 0x8}}, @ip_retopts={{0x30, 0x0, 0x7, {[@rr={0x7, 0x7, 0xd, [@loopback]}, @ra={0x94, 0x4, 0x1}, @cipso={0x86, 0x13, 0x0, [{0x2, 0xd, "a4249189b335727d168d12"}]}, @end]}}}, @ip_ttl={{0x14, 0x0, 0x2, 0x7}}, @ip_tos_u8={{0x11, 0x0, 0x1, 0x3}}, @ip_retopts={{0x78, 0x0, 0x7, {[@cipso={0x86, 0x16, 0x2, [{0x2, 0x10, "fb2b75fa70af7a9142b880a15bcf"}]}, @ra={0x94, 0x4, 0x1}, @cipso={0x86, 0x2b, 0x0, [{0x6, 0xa, "c6e24e66ebbfd79f"}, {0x7, 0xa, "a4d9ad756f262e50"}, {0x6, 0x8, "fdcbd9e487e7"}, {0x2, 0x5, "ee3901"}, {0x1, 0x4, 'E|'}]}, @rr={0x7, 0x17, 0x2a, [@multicast1, @initdev={0xac, 0x1e, 0x1, 0x0}, @multicast1, @multicast2, @dev={0xac, 0x14, 0x14, 0x1f}]}, @end, @lsrr={0x83, 0xb, 0xed, [@initdev={0xac, 0x1e, 0x1, 0x0}, @rand_addr=0x64010102]}]}}}, @ip_tos_int={{0x14, 0x0, 0x1, 0x2}}], 0x108}}, {{&(0x7f00000005c0)={0x2, 0x4e21, @multicast1}, 0x10, &(0x7f0000000780)=[{&(0x7f0000000600)="20115f08813118b222b5eca2986855205cd28336d49ccb8f21cc8e9fc7ebea2333b2824c8ecf12e41a96c74184768b17e148ff00fcc14e4eb1c22ad02d7999271e33ed5db470b6b03ce7c98e801135455f25cdf051abd115dfe66c009d0e7ccfe30208c746ef1cc4cef1e321d9c5b6558644d1995e4f0df5eca3279bf506c08edbc843d86d25e6ac59c4c3fbcc9fe90f10a7e341417c0629268a1e4c7f955ca1a0c4fc494537221870048e519653ab6bc1f28836c5647396e37d8e", 0xbb}, {&(0x7f00000006c0)="1e9b03f4ab770e2f345b44e67d2f9273935cc4fecb8be550ff1825208e39b8c6b1b545f92fbe77dab8847e3b873b37fb26465b45195418f2c7338efdf6a9714a2393f7553d9690b271b2107c38f2e1b8e59efa717cf406b1c6c44c378604aad1b0216e76bc425f68db981a07bff390be36256c575752c0775068118f3aaaf3ecda36529a6274c8ada043d2463afb6de60d729ba40d081cab415db0dc83208566ff185a669f526876367f27cd7a86c6b43d8d2c8923385ceb13ae4d8126569eef", 0xc0}], 0x2, &(0x7f00000007c0)=[@ip_tos_int={{0x14, 0x0, 0x1, 0x7}}, @ip_pktinfo={{0x1c, 0x0, 0x8, {r2, @local, @broadcast}}}, @ip_tos_u8={{0x11, 0x0, 0x1, 0xfb}}, @ip_pktinfo={{0x1c, 0x0, 0x8, {0x0, @dev={0xac, 0x14, 0x14, 0x43}, @multicast1}}}, @ip_ttl={{0x14, 0x0, 0x2, 0x9}}, @ip_tos_u8={{0x11, 0x0, 0x1, 0xf8}}, @ip_tos_int={{0x14, 0x0, 0x1, 0x20}}], 0xb8}}, {{0x0, 0x0, &(0x7f0000000980)=[{&(0x7f0000000880)="0d5e5353604f9ee83da8cae2c35ac231465cbfcce8b09033b0ba696423d2d401332d756731ab99d1bab1bbcb9e4652060e0b935b0770f518841bbbe671c8ead596d4d8cf665e85f466cbd5c847cc686b6c77d0466407bbe23e5ac340899902a2606357df396663a81d4644ca79f64286fe9cf772a257e28857c01eddc37ee99d85d84c5b739cd082cef688bf0258f8210a84a18daa6c8649600e322fc52eaf16bfc4102c2e027591bf8962b7aea27ea2c9376c9716901302b94d34c62d08c68c603481e43bf85354e7bfebaddc1edda220ee70", 0xd3}], 0x1, &(0x7f00000009c0)=[@ip_tos_u8={{0x11, 0x0, 0x1, 0x95}}], 0x18}}, {{&(0x7f0000000a00)={0x2, 0x4e23, @empty}, 0x10, &(0x7f0000000c00)=[{&(0x7f0000000a40)="ff312cbe2acd447fa1", 0x9}, {&(0x7f0000000a80)="90550e63f1968150709fce1196a06a85985b9f14ad476948c6a19b762992e9b0e9f64e3cca9efa1184437159cbec69c4c6e9e7d82dacf105477e092b1113cd0d8253a7f671a7b7fbcb2bbfca9d116299df6a686ee8ba3da33afa8c826afea75405cd861d383a3bcf49fac5b96e23c559c3062d13223dbe85993ae4a527d953beb478f62d468cc67d4c96909696b8e01403d20b5dd8b872c1d23dbd0ddbe4c7d0c8b81ed79ebadd33761c07acda446bc4f6818f55d469137fd519a055c4f87608bb6a3400ddf21bec95cb4094d5afe742b9877b187f", 0xd5}, {&(0x7f0000000b80)="cacdd15be640ccd6421057b3229585a34a0ebbd61744b6e959528dd9d11bc4a78e38d4baa80937c46f1af2882a70e457dae8657d90d477aaf4c4a26e3659563d59abb90b4aeda990481403222eb0", 0x4e}], 0x3, &(0x7f0000000c40)=[@ip_tos_int={{0x14}}, @ip_retopts={{0x58, 0x0, 0x7, {[@generic={0x82, 0x11, "08895e5ed0db3aa0a197f8af0ec372"}, @timestamp_prespec={0x44, 0x4, 0xea, 0x3, 0xd}, @timestamp={0x44, 0x10, 0x63, 0x0, 0x7, [0x80, 0x70000000, 0x10001]}, @generic={0x89, 0x9, "a1d47fc3272900"}, @timestamp={0x44, 0x10, 0xdb, 0x0, 0x1, [0x81, 0x7f, 0x2]}, @timestamp={0x44, 0x8, 0x74, 0x0, 0x6, [0x3]}]}}}, @ip_pktinfo={{0x1c, 0x0, 0x8, {r5, @loopback, @initdev={0xac, 0x1e, 0x0, 0x0}}}}], 0x90}}, {{&(0x7f0000000d00)={0x2, 0x4e24, @loopback}, 0x10, &(0x7f0000001100)=[{&(0x7f0000000d40)="155b0e293cdf53a13b92793b576ad536a1e87d3966ad7f123e8b6559ecf771c91d1aa1b1f681d0dd1b9b519d501fa654eb251916edb1bc2cd59a947a5126b90f1a8b7904b2ce2594ba33241ebe7f6d4d404926b43d18c2711c1cc41371edceec88a1ceb4d26101b4075e6c859620027f0c5ebed6b42aa367b18cbcdff4148bf4681559671b1309f5313226bee337b6e5c12b5cdc7d62d177b5699850e78d31e4a3be427e30dbbf3444a119a5ead25e7f6e0f4dc4f5e36a6c949be1a370eee7aef4c010af65888ed89f9694ad819bcf72c3067c00dcf0f8d1", 0xd8}, {&(0x7f0000000e40)="a41204ebf22a77d7fd6749a917b13847b269240dbadef590486cd1dc55341dfa8fdac96337ef98eb32792649cea4b8261d6905a91b67cf4af258e670", 0x3c}, {&(0x7f0000000e80)="52c842290eef259d1d9abaf29efd7791f5ffe8a118a5c9127c8ecec98cd512567717b7e3735d2453d939146977c5eef5a0a0e79d3c700e78691a8be1a53b6ccef44036a9ccb3573cdc6d4a449d7aec4b881dd09ddcb1ef59d38057ffedf9034108713f4fec00a71a4fd51bb34b56906ae5389aff", 0x74}, {&(0x7f0000000f00)="3470a5466304318982f8f58a55f18663290a241d4faa4df6865c2820a47183ebb0695271d8f502b502dbd74ee52338426c4d8b40e47881c79aee9089b4f76024f48653e5fcd68e729c10b2c0a79aa4ffa9b1f990a274c7e3ee3233f43a62b4950fc52edc0ca963eaf99513afd8aa3edaecce197794dc5bb0f02f05350e27b5d7c80e89712ac5fef005f2e7dbc734ece0703f52a69bf4c648533d8f5223879a043214d83b0b7058d567e94dc6acce26049677a0883ed8edf933584956d00415c412af6c2c7f44ef6dc566ea23be7651cb1ca2a5fade82e0cde50a4caa0bdf6e9bd476431aca56dc0551a474154167facb", 0xf0}, {&(0x7f0000001000)="0128dd099555df5990e6a3d826c1313c87d7df20f125e4365e540f1510693923736bf8be64ada1b775f7a332cf89de483ddfc1c76abb99ce15ee91f924b9643bb4738dfc60218871f750642f96004202b5e34c290d837ae3a514a85f757e5ba167eef208c1985af4e9768d5ccf5c852f5b621a29317b3e1ac1261e0672d88ef5099c40ed04039e7fe60e57184e28b1c4fde20070f791949ff31dbc1b75e87874120002659585b27382ab3af7f35d701b429e6c689b85f74f933e15b212db2f7b21e42321901d1a5371d4", 0xca}], 0x5, &(0x7f0000001180)=[@ip_ttl={{0x14, 0x0, 0x2, 0xca0}}, @ip_retopts={{0x68, 0x0, 0x7, {[@lsrr={0x83, 0xb, 0xea, [@remote, @broadcast]}, @ssrr={0x89, 0x7, 0x79, [@empty]}, @timestamp_prespec={0x44, 0x44, 0x33, 0x3, 0xa, [{@loopback, 0x5}, {@loopback, 0x9ac4}, {@empty, 0x6}, {@rand_addr=0x64010100, 0x4}, {@broadcast, 0x8}, {@rand_addr=0x64010102, 0x6}, {@empty, 0x8}, {@multicast2, 0x170000}]}]}}}, @ip_pktinfo={{0x1c, 0x0, 0x8, {0x0, @empty, @initdev={0xac, 0x1e, 0x0, 0x0}}}}, @ip_pktinfo={{0x1c, 0x0, 0x8, {0x0, @multicast1, @multicast1}}}, @ip_ttl={{0x14, 0x0, 0x2, 0x4}}], 0xd8}}, {{&(0x7f0000001280)={0x2, 0x4e21, @remote}, 0x10, &(0x7f00000015c0)=[{&(0x7f00000012c0)="9894e623a1056c207b8ab98ee54adafdf085da81a37f06af11af8ea35ebe32a909b6dc2a00cefe9ef6f0e0dbec64094699849cb3f1a87563c70be6221d53ea7de110fc3b5d71e98e7401a537731b1d4f39a0a55601beb7664f56cdb8ec89f6f3abc1d7090c87cb480e1a2531ca2b1b134099eaae57774ec144fe3af8f841396fe0bbbc3ab65257733eee9b7bbe9623b9bb4e6068b59580329892", 0x9a}, {&(0x7f0000001380)='o', 0x1}, {&(0x7f00000013c0)="372092a93ca6119d3657481739e554ecc784e9a8166d7cf723b856c367401a08dbdb6b27c4eaee0a937c7be88b4aed903a301f7fa386f9744a97a598e340b047cee5ef12f3e2b760ffb767eeacd3ec6b115dc33be519ab02e61ec224ae64bc77cd7b235d3596222c0179291b68f2b5ab02cd09970dfd2a85292c7103dded0dcc7a6322be8240cd19cc495cb94c502ccec31b18df745bfbff187851d33dc939dd9ac69f69d5f49f714b85c9c832fb4af3606b9da34c63f14f0d5c9a20eaa8791f9526d4b263d8e098ca94e77a12a8fc668450cd13a8c8b94d11b8a517d8823462694baeab3978bfb8d692daec16fe55e4c833314db655bd08c3a10efe69d11a", 0xff}, {&(0x7f00000014c0)="7cab5402e8cd1d8ae1433f85af65db679bb38e60c7a2ba50aabaec9f927081a7685b0daa7b87ebe6ec687d6f33c8b92003fe9c12d3979f764b6bb565290ea7492aabeaf2c64e94f9e31b51de46bc003d1625cbbbb0255c50447d1b9304f9b4313557cae3090b0f52b4fe28f22ce877c8c0c85cd26438bdc745fdb62d0f432587920eb0b462b7de1704d24688e87c78718cae99ddeee00ba5b4c9abecd17fc7fed85380e1ec3a69ab4112b78ed683c37024556b10d03463089aad68cd94de", 0xbe}, {&(0x7f0000001580)="a30f6cde7c099b41e4fbe798fe", 0xd}], 0x5}}, {{&(0x7f0000001640)={0x2, 0x4e23, @multicast1}, 0x10, &(0x7f00000017c0)=[{&(0x7f0000001d80)="b242ae519b1247bfaa2deda50a201e1cb8fe1c3c57219d40cb9ef9b02dd694e065e10491a23645cabfaefa14291cb90abb516fbf3a62bb523f9ad6766b393231713de473237fd66ee1c8eb1bd6d600e02c5b30fedfa9cfef79c1e4df2da2ec50ee85f3d656561050c2321b34e28e6f21776f0ffec02b1d8b58e044fa5318a5de7edc92e652cbdd58e67da5677d637080cc7d9ed94cc446da2cc365161c3f43fb07c4e3ef85b34bddf8b439b0d048a3be4a7445073594aa29ac4e84afe712f6cfb66167847f91503ded977293609c9758e2dc8a3abd6c2be3d2582bcfef777406a3fee7ce59eb8d3cd2ad8c23039f1e7897ace41b1ba13af107a87a9e1ed108b313621bd44c94872cb6da822740f6d98b75cd5a8dc5f60e46d7237fc0e2d459fcb77f9e86124bc10e8b5068c1e229208841d17fa7dd052894d71c481bfed296d9a0e12622231342531dd3825089133598b5c20af81cec3823809a6d03f3580b6249770adb38586987c50c9bd0f52cf8d11822b7418dab1319146441199eb3a91b3a36ebba53c313eaa8e3c28839d9c80bb9e5784674b2927f1337275f541cf5e10273e5f9e7f63fcdc512b0ce938b720cb428c7e18c9887e37c6effa4653671b265939ff0f2285bf8bda5329f01c658b3d0f3778d4f3a4275993edbf0aff4a1910d1311e104a07636f3250d2af8e2a35d7d6cbfaf2b2c4fc5fa99f28ada31af629e1f3ef2cccf03f1cf1eac4ce4a7b9842b84eeca6e298367385b8e4bd22dc7595274a5ae0ea6e248af8264917771aaddc027a3f3f1231cf57193eb1529b0beff62c5de22b749e4a49e725d2c17e66a95d2b7aa7adb35fdd3798bc729fd46c6e8db6313a50a63abe2516d3f78241275c36166b461fdd5983426bebdc9a7890d5db21db45977d375d3c70f796b3657be646da9f5261c0e1552857f592dc26c0a39489e599bc64b217447ff52fe3654fc213d3272c5a3b6e5b98eaaf2de717fdc6191a7fe1d379ce8491598a3e233d457e82174925f1b0fb3b3d9029f85b7a6a4d68f63daca582d9329f33fa558531f756d0fabed5da64a7aa2fa593b29693bddab21605b4563770a48793349c3d27deffad6e327748776f8af4054ce945fdb7770e693086d5bbf6771feabefeee86a85aa36ddae621b3a8ab4a0db525308ec774edba7f6fdde7f5fefa3feb7efb5472856ab46e1fac3f13bec4d19117cfe1f997df7ee8cf60f6262b4adfcb30720d7d13cb635f1d89b18aa8206d02d214c7d0e2c42ffecc6adbee323858e7f9ff3c8114e2f31c4925f9f684cef102a783dcc305e4594cc183a5f61322607cfe341f705bffb52967588fe6ea10eb502465641b74598470eaa1d0c3eabc62a7ae70a6950f48449e88cc94bb5fcad57b14195ddf0921e0d2fe5232ef95b969b8bd95f80b25ebc07aa4f7debaca6601a9ffa74490177e1f5a5d554e51740b5ab27ceb5dc4791d3e942e7ddea5a87b679d7aff9432f558d75d3199ce26fb65573f3cf5bb88e48645d2158a89d3501afd99b9ee289fc93be48beb20391717ddaae064a36ac49f5ba9b5721292e4459899688912065aac776ea38adc9c7e30b0bfa66f523b7a4c502e6b9fa1f93ca9a103b488534e83124b05be4007e7e615a056a62a593f65bce54e5f3dee9cab355b770a4a94fb2c774130e547cdc444406be4d3c415ea8610315f00fa84e3832dabeed31cd2a07e0e0230ff7a1d833e1c00e4d2d2658cc0519ab4ca90b80b640646983f4217117c7031138a360603e1e7c0c27c2e89954415e3c14334bf338817edbf0c84d4669b9fafc15a3709a9e5ff4a5b319d54e716a3eaa53ba488b3a9fa6eaa789984d913dd939d2067a84c634701f595b0a84512c51ca1fd5ee284f9648b9f5f8ab76008f1eab3e2779853bc628405532efbe57f70b3faca665fc7d336cd615d86f8613586b5616e0ba993477bd8dd3595b0f105f80aa9d5ec024c177245fd98fc5d19a288a590b5414b98604c845ff2930f0ba9267015ebeff3b0933023557be6b910a4436754ae3d25d49ba56546f5f59f1780873baf508186d1731911fcfc93b61f72659f2a2e61e0bb0ecdfd671f451a5fc43961614e793ba82b5e7903c6f79a466f84719981357c964d9c5f2e923e7e98266f634d6ca0200c88b0d8d369fffb4cab287f3082d6eaa546ba886abd40a444b28b54c928a2b6f093f6da9a7f96f1706b60386b9fed854cae565d0648e93019e86c122225fd0c539c3e2c7d19bf0e775e34b5ef905f81d21ab2ce444cf582b285e4b26568da46432e12743df4fa738bfcfd800e481fa4a13bee917fee38d4bcda52c5498febea5f3bb2d366482529f3e65edf67eb6fba29478cc9752a9ea7e5fe28ce87a63fcb2ee87b4a2e19d0cbaec4d51afc010c191ac870db03ca2f418dbffffd2cc6c0c01a2a343ecdd0080bf9b3f7f5df3a3d1df51223c8c86c5edf206bc283fab9c06b1d54bfc1a1ef72939d720f3f2fa1566121bdcbb52f2e10270ff344d2d0c77fd227dba913596927299b0f779048853a64744cf4a5aca78fda716cec87802b07ca6dd2f2a95b779fe11252b4bc54433635f51c94ebbd6cd513c1d1f62502fc57f8e942e4fe8c58600abc9a198fac583bf0ee3b6cf0989b9344429d41f5f9245f8319fa0861dad9115c1e5225a61c9c72843c42687db5cb59e18a3fe792bbb2d0d35eabaa890a097311f89a7deff5b128284d964db87916f07a4cb90300ccb6dde4b38dc31c290fe20eb0d7e8eca10f3e9387916c809d4d05c63b1f88dfd55250c3fc5195e2288ac280870465515b3870541785f301646390f901e1cea1cbcb3c793567581161b4c97871b90153493c0568db59d90fc444fe8e3cc3f2251b1fa5070451d1212105e730db436aa9710dfd547dcd40f0095f083eba224b16e0ca0c1be78d6ef1a2405d9f4cd6fc06b5e20bb2d93bc069b8a081efa54bb4160acd8eee514395a8a2b4f72b8356538fa63868ee3685920380bac708211bfd66fc5c883889af14305a8896efef7b1c7278c00ef4d9c472632f2b7d660e99b6fedebf536328231cf2ed4cbcda4cfa784332a9481d71b4f8b0d1f3499d0a64aca015e2bd7bc47bc2bac2382577d8716ee2912c48a00f765c17591e82a484959a4e558b5581b1aa159a7597b091b3389f8727f507020feae305c0616f8f6ad9d9aee13712cf9fda696959999d84ef50b1268d3b7565b0c220cd4bf06caf76aa7799097907344156a407199500ad4311deee361728bfcf753ce4fb404764e0cb40877041de077ea1fc1ec0115af1e2b2181a511de8012e0b56375df11f8e0b1f6ae6c0765d2907890d72493b342f61b47ad3c7513ea09c2d08b4d6c2ebf71ed8292457161a80c9efa8fb7ca7e4621798bc35fc26c10c857e44c0d7ea63084cdc029c9d809a7cd7dc6ddddabf0c8fb8c3560b3214e5488fcd1eca5fc2608eac431f7b3f8a92316056a3850941fe062713e4590841e3abb7216c5b56658c78151a776806e0bf52aa07d826e07a6a90f6ba4ba6eac81f81c41a17cb55cf894ad19adb5b39f8c865adaa73fd1f33735819d8c9cf50096f5065dd82e2805a03da4de2518ded5b1038e383de49cd2e0f1d875f972898381c8ec5bdd731442f5b4bbcdbe1b44086eb25015557557fbdd0de79562c9bbff095f59d71b35082610a5ce3a153e3e59d972d68862cb7ec177991371c72a422c5df2882e098c1b747b960e353e995e36c6832f27d5e29babdf3b74a79719a32447a885e1adfb25a74d6ae8452bfad0ff832a889f47fe7113589bfef48d2b88ea94363a8a836267e6c9eb1db6cd950d8c0f3469c1565b18f56ede3d4ffd2c5b3a878d8b7026a243fc04a2f12148ad0a5807a7d4adb61c6e3ed457bf3219cf03815e589171c6b07426dd0692abdb91a663e3b37b4e11d05181299d63549ee0e0472163a1aa65b45f058d07dccc688dfb20d8523dc2898451ab5c1dc5bbcc4f2aa260b2faf3fab37685fc293b3119deca3edc9ce28eb584cc904b11e745c5319a95798fb0ff46b27c6ef3d3ba843050dd2d2c025cff6db1cea1fe5f9bd94c7e11f6af357ef994368b848b6e03baea57c2d2a0a207f26e7a256c7e50c82b2421fbd1c2c27b82f62497a0212e4828af0f1dc104be93a4a39afc3a576c950ac6d10bae80950b3d02bcde820b384df4a3fa33edbc25aeca61a7f61b28c00764640a0b217619f7394da3c746b5e0ccbfe2429ebce3932b8600de6dc243251c7de5a7707669d0ffb9fac49a146f5924805ce2a70c282bd5584fa45b65eed893c80c8fb3f4b86baf4c3bb7cd788a1461944c51d8896a38690be2b6497fe5ef791a11f8121a5b94c554883eeaa972de6e6c314dbe35978b52787668ba1c2e0c94dfec878b4c37f8dd840b6be1c425f92b94fb79d1261bd36a573f17a6b3a474714720255174233360342d96b66fdea23dc1f22a54c3f93e6ebcde31096b7664e3fc2c74db439b8dfee46583bd1b203a1d8746682a6a8413d8d0f84e2075ad08c4aaf26b0f27205d95aedaabc00de025a6ab48052be521561e6d09807e551b1ea9141fe52791df4553b7567d1c72b236f545fec240d1a5a641cd1ce94ea7812a8764d98566e5656875d8822586bb2c2c6a4877b5a7f3a1f16f46b86bd7f2b06e4db1abe4cd3c4263b559bbb68a25510b8f605d191bc229212d06a753ac3b57c7635ec0a88c5f17f6a98fae26b24d21d710c1c81c511ac12f427c70f77dbf003210873b35ce1c8e29fc1928eb8e9823c1b7707192ea6e00b247ef545ced0b62884ec974dbc1f28abd7ed00c9e1d33bb8041d9d51c4629c760f6e9b87f41138ca45e0a111dcdb575f331686f54b60694b295fcdaf7b3b8df7e1bb3693214092d0780e3b8e0fea3f935131f8274bc1348688df4fbcf983b2a7c5d035fa94cbe6d7cfff32a59fbda1a9f4fb3a494050ac41fd9946ce1b8c733e737c2327ce99ac27a621967f9940394202937f1b48970d6ca533673f7476aa3f80c18737e60dd27c136311983642789ac2f710397eee32bfd63f0abdfd18a61f12e1701b2d2b3e55870f36b683b91dc205b7aa96f99621b238745a05e646ff3427da96e1ce8f6733e54f6683893baeac0e83c410b0ced1824b26c6ae341f74864824bbe83507d70b689c3ff2486a3dab0ff82a621d5771fc31d9b1b4d652c2bf3c86086561e0322359a53d2e736a1bd8cb47e28a328b4f1fb108f57cf536440c86ba7d12a71e512299281846543eab0bd02261f4bf73902589c7b5c89071a77638c523728286088bab0ad194c44bb6e8bb08a9915182b61aa3ef11e7e1cf418895b2ee5adff7c6ea71b05084824535432cc9d45ef980ad5e001a882916318166df12185122937799a7545a1821e67c838a6e7c3338c4103cd6d7f364468867da71a38f181ed0a01d41282d6d270b1cfb5a885941d2484e9da9d6fb6ab39e0381ded7863f8161b8ca38760cb0643c9d7691bffcff4d71522dd86cbac102c34ab5907a1a818febc5c65d65e51381f845afebd3d0249a8a1890017e7dd1ae7b0c508b6a7d5525d2c3f8321dd43a7ad7c23d483778739a33fda33f7636f2301f7d100092576a66229e701834cc426ece28d3de1939360c294a6ca9903146fd4449df8ac56830c5759c1512211571d7c91d7f53499f1545ce4e0c99385c960750b1b6474690adb28a5cfbfb54f0feb3492e74af1cb829cb3942e04a4f6fdbb0004d170e535d059f8c51255c3ba8981355d4d2a8c7396412572a0068e701539be49e4253b06900ff2f9bd02c197d831", 0x1000}, {&(0x7f0000001680)="cd5abeccf78bb129da687e638e9aa4980777db027eb8e6ff4bf405684aafce016154df2ded", 0x25}, {&(0x7f00000016c0)="e0d9fb571271e7c2e4818e7e2fd1fed7590fa04e1137b5e1dbca0a3896fb29b9bf55813d22c381758916beefd5fc8dfafaf3824f19acc3d0218260166fb57950c6c9c23ca817a1bfa07df1936f2f177e2e29609b02ddecb8ba3c4a3082937b4d996eee30061d35460f899f17e3dd334b308d98289a12aff49c7a53785984761ddeec3d5ebd49a785a056baef0f577e1a7a1c656d00023397d0e39d0d32901aa1e04f0d1e6250755a53f2c2f5f381c6bdc085285c40a5b1c2d4970581c893fb1eaed11f43909badc1da7cc39cbfa1ad2c50b4a8bc59f52cd494b0d51ca93b53a4fcf4df16f5", 0xe5}], 0x3}}, {{&(0x7f0000001800)={0x2, 0x4e20, @dev={0xac, 0x14, 0x14, 0x18}}, 0x10, &(0x7f0000001cc0)=[{&(0x7f0000001840)="9a898dc601f47696cc62c63bf283daf15725e2875b257b7e767a3a2068cc45c20b71d4cc44d845e5b797b55dfb14bc8b25a0fdfecb412c54add392361e0eabdc1459e668976aa92e89f9e4e0be99e6592d8a421cd82694c98072ef0adba04ce56a8552f405fad670c7378945bf5be5632e3ef0e023f7bf27508a7833044d8ce8bf85673362176bb3104bd7b7aa434fa5d585c3fa3fbc2fe50b396c7c0817ddf3934f31b211de8b896ef7d333a1f55943705b771b252d14330b44ddb23ce78522b888289056c49a973185", 0xca}, {&(0x7f0000001940)="2b1a12d822554c41bf293dc02172a3f5746d18f8a48277c0d67e083264109149febab0454e0a135a4b03cb4a870d83fd4b7edb1f61a1fdfd3bdd2cc78db45ebc9a7516c3609be38abb7df7cc2ab0f0c766191b17c9c6e815cb3ab53cbf5e5f5b6f5c54b4052fd78ddcb177053abc82", 0x6f}, {&(0x7f00000019c0)="b316d6cad8674aa5baee516b9aaef9102a94fa59325c9cd3f4dd57fb45359dc7d4c21a7022f6d37ea73b6734d38c6a1e7cb7685cab04a548e7a77d186410c9937b5f58f7af36a535e2ead100014ac33d61e54d7f6227e567845fd41bf83a0b5abe5b4fd68ac301f9d7fcacf90df9faaa4d22aacd60db71798e665bb08d70eb26dbd7eba9108a135d1b51408bed3cdcfe6900ebd3581eefed35aae079e023c2a373964f3f34387e1ee68667a612f9af647d6f77befd9326437e0a66fce174e60ced405ff0eb63676a661bbfe16075c041725de61e628326981ed2785088f5c11b", 0xe0}, {&(0x7f0000001ac0)="7f4ab5a9ff0d9379eb8a6d8b02d9bdc4f6169dbd8794aa6994f310318b068f08aa2bcc6df2a69c3e463d7ef726ad27102f0fb8d8112e9cdd27d03e6bc4abb322c15a26d3339f57a492edc80f8f1ef9a5a1cb477152f42275879ca0d12c2aed8c35a084498bff7e5cda1478cc55b91c5c57c3f162fae3b57c081e10f59f51bbcbb517ccd113f07e43b4d992216487499d6baeb058cb7949ebb7d4ad46e015d2749fd25bb2e241c6f0a06fe8554e551ac7cbcd084a3aada5ca73903bc8c5bd", 0xbe}, {&(0x7f0000001b80)="367089a2fd8081982423736879f95a497ff068632920f3e3af2ad7a2f7cca1", 0x1f}, {&(0x7f0000001bc0)="1fb0b276de9a9dd6ce297c49b87b1518779b75abf9721e48fae3f9fb857675261dbdc9a31522bfb310d3f35fff0ac009dee6bf2f051ce1ffdba3fb7d403cb7ec1bc6baf8bbfbddf1cbe2d0549a1c0476cf393d2eaac14488fdd3460ec80d2d4b9505d8bc503ce36ab482870b4e054b16a3cbffca07945a53e22ba6e8bf8eb69782278eb480d5e30bfc26e12f1d64c27d35c0903a3adf92ce59806c8fe734be0d961ce8342117b618201f344c14fd12b49fc5f1487989ea7759f527a2babf61812e0006e17dbe433eccc3987e2f20a378a39989bfde55b0397cd1ad", 0xdb}], 0x6, &(0x7f0000002d80)=[@ip_tos_u8={{0x11, 0x0, 0x1, 0x1}}], 0x18}}, {{0x0, 0x0, &(0x7f0000002ec0)=[{&(0x7f0000002dc0)="0b6eca461bd70e01d19b7ad22f7e32c3c8cfe40d80ea3f5fd61900dda9d525c5a29a02054155ae160d266b0a88fca90067f2c805287592edb537e7affa4663e4e5cfde9464135268aa1cf5468257846a429f33433e28c56d27d5181691635812723b3dd0de2f99d020f59c6fc3feb8232f45857f6be13655fd0f0b3605ca6f312c2d2a9153e3b3d7510cd932a585f12c468b98bbf515b15e79b69202e556bbe96bdfd62b741f16d2f848", 0xaa}, {&(0x7f0000002e80)="9702b6e9d94830306fdb5dce81785e8af507a77cbf", 0x15}], 0x2}}], 0x9, 0x44844) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r6, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) [ 2169.429875][ T7104] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 10:41:45 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xea030000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:41:45 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (async) accept(0xffffffffffffffff, 0x0, 0x0) (async) r1 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r2 = openat$cgroup_ro(r1, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r2, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) (async) ioctl$FS_IOC_RESVSP(r2, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) (async) r3 = bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000080)={0x6, 0x4, &(0x7f0000000b40)=ANY=[@ANYBLOB="180200000000000000000095000000000000003a64d725989c4db16f3baf71e8e1e0e0fda53265e4986a8c0c5df4890b46f7846cb7e88700f70c9f5bf25e11ea397e02a20816d06d8cf70fb1ce0f722145d23429d9e7cfb38f06d6534257aa696d61d62cf6a1a78b21361809b6a0395915bdb64fb4b4cb0fad8a71b0bbd7cd3d179cd7e2adb6b26e449c1d44b4b691a0aa23604699a34918f4f3bdb02390cab447693161cfe5ce5e07eae50ceb073b257290594a250afbbfdd2d0fedeca5db427aaf183a78f82a56000000000000ffff396224335038f286ca71f5d1b5dc035167eb7c76d84b5a07fea66f1738894a4ea1ccc9e9049ef948b9e97de4f14e721fa1ba5af84ad400"/275], &(0x7f0000000100)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) bpf$BPF_PROG_TEST_RUN(0xa, &(0x7f0000000000)={r3, 0xf000, 0x0, 0x41, 0x0, 0x0, 0x41, 0x0, 0x0, 0x0, 0x0, 0x0}, 0x48) (async) ioctl$F2FS_IOC_MOVE_RANGE(r3, 0xc020f509, &(0x7f0000000140)={r3}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000400)=ANY=[@ANYRESDEC=r4, @ANYRES32=r2, @ANYRES64=r3], 0x0, 0xfffffffd, 0x0, 0x0, 0x41100, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0xffffffffffffffa8, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r2, 0x0, 0x0) r5 = bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000080)={0x6, 0x4, &(0x7f0000000b40)=ANY=[@ANYBLOB="18020000000000000000000000000000850000007b00000095000000000000003a64d725989c4db16f3baf71e8e1e0e0fda53265e4986a8c0c5df4890b46f7846cb7e88700f70c9f5bf25e11ea397e02a20816d06d8cf70fb1ce0f722145d23429d9e7cfb38f06d6534257aa696d61d62cf6a1a78b21361809b6a0395915bdb64fb487cb0fad8a71b0bbd7cd3d179cd7e2adb6b26e449c1d44b4b691a0aa23604699a34918f4f3bdb02390cab447693161cfe5ce5e07eae50ceb073b257290594a250afbbfdd2d0fedeca5db427aaf183a78f82a56aa9868077e2bb77d396224335038f286ca71f5d1b5dc035167a66f1738894a4ea1ccc9e9049ef948b9e97de4f14e721fa1ba5af84ad4"], &(0x7f0000000100)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) bpf$BPF_PROG_TEST_RUN(0xa, &(0x7f0000000000)={r5, 0xf000, 0x0, 0x41, 0x0, 0x0, 0x41, 0x0, 0x0, 0x0, 0x0, 0x0}, 0x48) ioctl$F2FS_IOC_MOVE_RANGE(r5, 0xc020f509, &(0x7f0000000140)={r5}) epoll_ctl$EPOLL_CTL_MOD(r2, 0x3, r5, &(0x7f0000000000)={0x30000000}) (async) r6 = syz_genetlink_get_family_id$tipc2(&(0x7f0000000300), r4) sendmsg$TIPC_NL_MON_SET(r0, &(0x7f0000000480)={&(0x7f00000002c0)={0x10, 0x0, 0x0, 0x8000}, 0xc, &(0x7f0000000440)={&(0x7f0000000340)={0x80, r6, 0x100, 0x70bd2d, 0x25dfdbfc, {}, [@TIPC_NLA_SOCK={0x6c, 0x2, 0x0, 0x1, [@TIPC_NLA_SOCK_CON={0xc, 0x3, 0x0, 0x1, [@TIPC_NLA_CON_NODE={0x8, 0x2, 0x6}]}, @TIPC_NLA_SOCK_REF={0x8, 0x2, 0x100}, @TIPC_NLA_SOCK_REF={0x8}, @TIPC_NLA_SOCK_CON={0x2c, 0x3, 0x0, 0x1, [@TIPC_NLA_CON_NODE={0x8, 0x2, 0x3}, @TIPC_NLA_CON_FLAG={0x8, 0x1, 0xffff7fff}, @TIPC_NLA_CON_FLAG={0x8}, @TIPC_NLA_CON_FLAG={0x8, 0x1, 0x2}, @TIPC_NLA_CON_FLAG={0x8}]}, @TIPC_NLA_SOCK_REF={0x8, 0x2, 0x8001}, @TIPC_NLA_SOCK_HAS_PUBL={0x4}, @TIPC_NLA_SOCK_ADDR={0x8, 0x1, 0x2}, @TIPC_NLA_SOCK_ADDR={0x8, 0x1, 0x80000001}, @TIPC_NLA_SOCK_HAS_PUBL={0x4}]}]}, 0x80}, 0x1, 0x0, 0x0, 0x8005}, 0x810) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) r7 = socket$netlink(0x10, 0x3, 0x0) ioctl$FS_IOC_GETFLAGS(r1, 0x80086601, &(0x7f0000000180)) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) sendmsg$nl_route(r7, 0x0, 0x0) 10:41:45 executing program 4: pipe(&(0x7f00000002c0)={0xffffffffffffffff, 0xffffffffffffffff}) sendmsg$RDMA_NLDEV_CMD_RES_GET(r0, &(0x7f0000000480)={&(0x7f00000003c0)={0x10, 0x0, 0x0, 0x584e5f7cb5ae3b1a}, 0xc, &(0x7f0000000440)={&(0x7f0000000400)={0x20, 0x1409, 0x8, 0x70bd26, 0x25dfdbfc, "", [@RDMA_NLDEV_ATTR_DEV_INDEX={0x8}, @RDMA_NLDEV_ATTR_DEV_INDEX={0x8, 0x1, 0x1}]}, 0x20}}, 0x20000810) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r1, 0x0, 0x8000000000004) r2 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r3 = openat$cgroup_ro(r2, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r3, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r3, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="79ebf74a5521c4fcc972aff6e1b2c11108f89cc02a767211e12e64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d4f984b8f3250bd7aab2edeb6904", @ANYRES32=r3, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r3, 0x0, 0x0) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r5 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r5, r4, 0x0, 0x10000a006) getsockopt$inet6_IPV6_IPSEC_POLICY(r4, 0x29, 0x22, &(0x7f00000005c0)={{{@in6=@dev, @in=@private, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}}, {{@in=@empty}, 0x0, @in6=@dev}}, &(0x7f00000006c0)=0xe8) sendmsg$nl_generic(0xffffffffffffffff, &(0x7f0000000980)={&(0x7f0000000580)={0x10, 0x0, 0x0, 0x4000000}, 0xc, &(0x7f0000000940)={&(0x7f0000000700)={0x238, 0x21, 0x400, 0x70bd2b, 0x25dfdbfb, {0x2}, [@nested={0x9e, 0x8d, 0x0, 0x1, [@generic="dfcbc232952c8b1529afd9e9ffafbc538a8157d5ab9f22ffa3fd2f63ffe4acec99b86866b5cc36f6fb2df7ca55d190128bcf32c3a8f87426debb95bd7b9624139ebaa657cfc92930d2c45575060788ba1d24586abd626b73c0580c00c5512ac6db548b60e98cc93efb9ff948a8338c2b922dc35871164e48d14f4bec990d6b1abb7b6f12ad5f55e3933c0c3290fb4dae3ef739cbc138ace2c96e"]}, @nested={0x181, 0x84, 0x0, 0x1, [@generic="d8e63a536da3c5bb0086b9c4af4736f4b84bd7f60d33c865cefb57d21664dcee43d03fbeb8a9e99c856aa9787516e41f", @generic="9e80cb85750c013c961ab48c30ee618cdaeee0766e2a", @generic="9adae4c44c4610a24ebbfdfe9d0fb6b1f00c744f5898dbaf43ed2882a6ffdf01b1a5df1f6c78299406b36a89a3ed109c81338eae23c777ff", @typed={0x8, 0x4e, 0x0, 0x0, @uid=r7}, @generic="ce7509ed1f7200415caa876e7df7fd3b5cf8f3fc110e8c4e410701b822b810262ac036ce137812fd1633dabb209194c2201a0b4d7a8a48423133771ed1d25c04f03912c4106e9e232087aedb8088207b27f1694cc72f256c9f47281c1fcef7ae721efef74d4df66e6fb38698ba10720c9d711dbf3358e3994d821fb3adc4220d45d11a809643695936707e6b9f3edaeef0ff6f559d97fb01c2abd19565f69b91d66bd16fda896b5291c09c925ee120556fd4c18617b13915fce990bcccf04f6e08a408db6bff83b19595abaf64495e00a3a2d375e42502102e994c7d95cba54e0604f1f178dc35215fd7c8dfb783f2", @typed={0x8, 0xa, 0x0, 0x0, @ipv4=@loopback}]}]}, 0x238}, 0x1, 0x0, 0x0, 0x4000}, 0x4008081) setsockopt$inet6_IPV6_XFRM_POLICY(r3, 0x29, 0x23, &(0x7f0000000180)={{{@in6=@remote, @in=@private=0xa010100, 0x4e21, 0xfff, 0x4e24, 0x9, 0x2, 0x80, 0x80, 0x3b, 0x0, r7}, {0x2, 0x2, 0x2, 0x0, 0x40, 0x800000, 0xee, 0x80}, {0x7, 0x7f, 0x2, 0xc6}, 0x106, 0x6e6bb5, 0x1, 0x0, 0x0, 0xf498d57fab2e5c95}, {{@in6=@mcast1, 0x4d6, 0x6c}, 0x2, @in6=@dev={0xfe, 0x80, '\x00', 0x34}, 0x3504, 0x3, 0x2, 0x2, 0x9, 0x6, 0x100}}, 0xe8) r8 = openat$cgroup_ro(r1, &(0x7f00000000c0)='net_prio.prioidx\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r8, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r9, 0x0, 0x8000000000004) sendmsg$nl_route_sched(r1, &(0x7f0000000540)={&(0x7f00000004c0)={0x10, 0x0, 0x0, 0x4000000}, 0xc, &(0x7f0000000500)={&(0x7f00000009c0)=ANY=[@ANYBLOB="3c062ef900fcdbdf2500000000", @ANYRES32=r6, @ANYBLOB="f2ff09000400f3ff0f00ffff0b000100666c6f776572000028000200060025000000000014001000fe8000000000000000000000000000aa050036000000000006000500030000000c0001007463696e64657800d0050200cc050700c80515000b000100706f6c6963650000180502800c00080000feffffffffffff080004005d9700003c000100008000000100000000080000020000000f630000ba01050008000300ffffffff7f02030040000600000000000300000006000000000000000c00080006000000000000003c0001000001000003000000ff00000006000000020000000801ff7fff7f000100000000090001009700090006000000ffffff7f00000000c76400003c000100b10400000500000001000000bb530559000000002002050004000100080000007f0202000080380f86269c7c6a000000050000000600000008000400b790000008000500000000000c0008002e0e0000000000000c000900070000000000000004040300070000008100000001800000090000000400000001000080800000001f0000000002000007000000040000000900000005000000432a0000880000003a000000060000000900000084ae000000002c020000000000010000030000009e0b0000e1ffffff0200000000000000070000004f060000040000000000000007000000ff01000027170000040000000800000001040000860000000700000008000000020000000100000003000000010000800900000000000000050000000300000008000000070000000000000000000000010000000800000003000000ffffffff040000008100000009000000050000006d0b0000010100000900000001000000000100000900000000000080000000000300000001000000000200000500000006000000050000000000010000020000010000000700000085d500001f00000007000000070000009484000004000000000400000200000001000000ff01000002000000010000000300000000000000b2000000050000000400000003000000020000000600000005000000000800000004000008000000ff07000005000000119c00000300000001000000070000007f000000060000000101000005000000060000000400000002000000c0ffffff7fbf0000bfd6000000000000000001000600000007000000000001000080000088050000f9ffffff0700000000000000040000000101000005000000de0e00000000000008000000030000000600000073c50000ff7f00000500000003000000810000000700000005000000020000000900000009000000ffffff7f040000000700000001000100000400000000000007000000020000000000008011000000030000000000000000001c0007000000b70000000800000007000000010100000400000004000000080000001f000000040000000400000000000000f699830f080000000300000002000000010000800180000005000000c46400000000000000000000050000003f0000000010000009000000080000000200000002000000080000000ce3000007000000000000000101000003000000f8ffffffffffff7fa8600000040000000900000009000000020000001c0a0000ff0f0000000400000000327bff7f0000000800000100000008000000ffffff7f0000000001800000dccb0000a47800000400000009000000000000007270000000f0ffff010000000100010000000000790c0000070000000000000007000000090000004500000006000000070000000001000020000000080d0000040000000600000002000000020000001f000000000200000500000000000000030000000100008044ad000007000000c1e8000080000000020000000800000003000000fdffffff00000000810000001f0b000005000000020000000c0009000002000000000000080005000700000087000600cba97a8ecb8c7d88e326ea514585238fbe1b5f07aa99007d8157be4a8dcb839419975627c5242a9664ea98c88b816f6fc20865568a6488bdcdda4a93b85195827ec4d216a433ded473854f212ac709225e7a6c6e3b305d9af5656815c97b1ba54fcbb5e3a7d6046d97fd4bd2d29eec2b5e1ca8d37e640e38676ef622403c2f39e418c7000c00070001000000010000000c0008000300000001000000"], 0x63c}, 0x1, 0x0, 0x0, 0x4000008}, 0x8011) setsockopt$inet_sctp6_SCTP_PARTIAL_DELIVERY_POINT(r9, 0x84, 0x13, &(0x7f0000000100)=0x886c, 0x4) pipe(&(0x7f0000000140)={0xffffffffffffffff, 0xffffffffffffffff}) sendfile(0xffffffffffffffff, 0xffffffffffffffff, 0x0, 0x8000000000004) preadv(0xffffffffffffffff, &(0x7f0000001580)=[{&(0x7f0000001080)=""/201, 0xc9}, {&(0x7f0000001180)=""/227, 0xe3}, {&(0x7f0000001280)=""/248, 0xf8}, {&(0x7f0000001380)=""/10, 0xa}, {&(0x7f00000013c0)}, {&(0x7f0000001d80)=""/4096, 0x1000}, {&(0x7f0000001400)=""/93, 0x5d}, {&(0x7f0000001480)=""/35, 0x23}, {&(0x7f00000014c0)=""/180, 0xb4}], 0x9, 0x0, 0x5) close(r10) mmap(&(0x7f0000ffa000/0x4000)=nil, 0x4000, 0x2000002, 0x1010, r10, 0xdbafc000) getsockopt$inet_sctp_SCTP_MAXSEG(r11, 0x84, 0xd, &(0x7f0000001000), &(0x7f0000001040)=0x4) ioctl$BTRFS_IOC_SET_FEATURES(r9, 0x40309439, &(0x7f0000000080)={0x1, 0x3, 0x2}) openat$cgroup_ro(r1, &(0x7f0000000000)='hugetlb.2MB.usage_in_bytes\x00', 0x0, 0x0) [ 2169.619623][ T7104] 8021q: adding VLAN 0 to HW filter on device bond1396 10:41:45 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (async) accept(0xffffffffffffffff, 0x0, 0x0) r1 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r2 = openat$cgroup_ro(r1, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r2, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r2, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) (async) r3 = bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000080)={0x6, 0x4, &(0x7f0000000b40)=ANY=[@ANYBLOB="180200000000000000000095000000000000003a64d725989c4db16f3baf71e8e1e0e0fda53265e4986a8c0c5df4890b46f7846cb7e88700f70c9f5bf25e11ea397e02a20816d06d8cf70fb1ce0f722145d23429d9e7cfb38f06d6534257aa696d61d62cf6a1a78b21361809b6a0395915bdb64fb4b4cb0fad8a71b0bbd7cd3d179cd7e2adb6b26e449c1d44b4b691a0aa23604699a34918f4f3bdb02390cab447693161cfe5ce5e07eae50ceb073b257290594a250afbbfdd2d0fedeca5db427aaf183a78f82a56000000000000ffff396224335038f286ca71f5d1b5dc035167eb7c76d84b5a07fea66f1738894a4ea1ccc9e9049ef948b9e97de4f14e721fa1ba5af84ad400"/275], &(0x7f0000000100)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) bpf$BPF_PROG_TEST_RUN(0xa, &(0x7f0000000000)={r3, 0xf000, 0x0, 0x41, 0x0, 0x0, 0x41, 0x0, 0x0, 0x0, 0x0, 0x0}, 0x48) (async) ioctl$F2FS_IOC_MOVE_RANGE(r3, 0xc020f509, &(0x7f0000000140)={r3}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000400)=ANY=[@ANYRESDEC=r4, @ANYRES32=r2, @ANYRES64=r3], 0x0, 0xfffffffd, 0x0, 0x0, 0x41100, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0xffffffffffffffa8, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) (async, rerun: 32) write$tun(r2, 0x0, 0x0) (async, rerun: 32) r5 = bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000080)={0x6, 0x4, &(0x7f0000000b40)=ANY=[@ANYBLOB="18020000000000000000000000000000850000007b00000095000000000000003a64d725989c4db16f3baf71e8e1e0e0fda53265e4986a8c0c5df4890b46f7846cb7e88700f70c9f5bf25e11ea397e02a20816d06d8cf70fb1ce0f722145d23429d9e7cfb38f06d6534257aa696d61d62cf6a1a78b21361809b6a0395915bdb64fb487cb0fad8a71b0bbd7cd3d179cd7e2adb6b26e449c1d44b4b691a0aa23604699a34918f4f3bdb02390cab447693161cfe5ce5e07eae50ceb073b257290594a250afbbfdd2d0fedeca5db427aaf183a78f82a56aa9868077e2bb77d396224335038f286ca71f5d1b5dc035167a66f1738894a4ea1ccc9e9049ef948b9e97de4f14e721fa1ba5af84ad4"], &(0x7f0000000100)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) bpf$BPF_PROG_TEST_RUN(0xa, &(0x7f0000000000)={r5, 0xf000, 0x0, 0x41, 0x0, 0x0, 0x41, 0x0, 0x0, 0x0, 0x0, 0x0}, 0x48) ioctl$F2FS_IOC_MOVE_RANGE(r5, 0xc020f509, &(0x7f0000000140)={r5}) (async) epoll_ctl$EPOLL_CTL_MOD(r2, 0x3, r5, &(0x7f0000000000)={0x30000000}) (async, rerun: 64) r6 = syz_genetlink_get_family_id$tipc2(&(0x7f0000000300), r4) (rerun: 64) sendmsg$TIPC_NL_MON_SET(r0, &(0x7f0000000480)={&(0x7f00000002c0)={0x10, 0x0, 0x0, 0x8000}, 0xc, &(0x7f0000000440)={&(0x7f0000000340)={0x80, r6, 0x100, 0x70bd2d, 0x25dfdbfc, {}, [@TIPC_NLA_SOCK={0x6c, 0x2, 0x0, 0x1, [@TIPC_NLA_SOCK_CON={0xc, 0x3, 0x0, 0x1, [@TIPC_NLA_CON_NODE={0x8, 0x2, 0x6}]}, @TIPC_NLA_SOCK_REF={0x8, 0x2, 0x100}, @TIPC_NLA_SOCK_REF={0x8}, @TIPC_NLA_SOCK_CON={0x2c, 0x3, 0x0, 0x1, [@TIPC_NLA_CON_NODE={0x8, 0x2, 0x3}, @TIPC_NLA_CON_FLAG={0x8, 0x1, 0xffff7fff}, @TIPC_NLA_CON_FLAG={0x8}, @TIPC_NLA_CON_FLAG={0x8, 0x1, 0x2}, @TIPC_NLA_CON_FLAG={0x8}]}, @TIPC_NLA_SOCK_REF={0x8, 0x2, 0x8001}, @TIPC_NLA_SOCK_HAS_PUBL={0x4}, @TIPC_NLA_SOCK_ADDR={0x8, 0x1, 0x2}, @TIPC_NLA_SOCK_ADDR={0x8, 0x1, 0x80000001}, @TIPC_NLA_SOCK_HAS_PUBL={0x4}]}]}, 0x80}, 0x1, 0x0, 0x0, 0x8005}, 0x810) (async) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) r7 = socket$netlink(0x10, 0x3, 0x0) (async) ioctl$FS_IOC_GETFLAGS(r1, 0x80086601, &(0x7f0000000180)) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r7, 0x0, 0x0) 10:41:45 executing program 4: pipe(&(0x7f00000002c0)={0xffffffffffffffff, 0xffffffffffffffff}) sendmsg$RDMA_NLDEV_CMD_RES_GET(r0, &(0x7f0000000480)={&(0x7f00000003c0)={0x10, 0x0, 0x0, 0x584e5f7cb5ae3b1a}, 0xc, &(0x7f0000000440)={&(0x7f0000000400)={0x20, 0x1409, 0x8, 0x70bd26, 0x25dfdbfc, "", [@RDMA_NLDEV_ATTR_DEV_INDEX={0x8}, @RDMA_NLDEV_ATTR_DEV_INDEX={0x8, 0x1, 0x1}]}, 0x20}}, 0x20000810) (async) sendmsg$RDMA_NLDEV_CMD_RES_GET(r0, &(0x7f0000000480)={&(0x7f00000003c0)={0x10, 0x0, 0x0, 0x584e5f7cb5ae3b1a}, 0xc, &(0x7f0000000440)={&(0x7f0000000400)={0x20, 0x1409, 0x8, 0x70bd26, 0x25dfdbfc, "", [@RDMA_NLDEV_ATTR_DEV_INDEX={0x8}, @RDMA_NLDEV_ATTR_DEV_INDEX={0x8, 0x1, 0x1}]}, 0x20}}, 0x20000810) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r1, 0x0, 0x8000000000004) (async) sendfile(0xffffffffffffffff, r1, 0x0, 0x8000000000004) r2 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r3 = openat$cgroup_ro(r2, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r3, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r3, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) (async) ioctl$FS_IOC_RESVSP(r3, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="79ebf74a5521c4fcc972aff6e1b2c11108f89cc02a767211e12e64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d4f984b8f3250bd7aab2edeb6904", @ANYRES32=r3, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r3, 0x0, 0x0) (async) write$tun(r3, 0x0, 0x0) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) (async) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r5 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r5, r4, 0x0, 0x10000a006) getsockopt$inet6_IPV6_IPSEC_POLICY(r4, 0x29, 0x22, &(0x7f00000005c0)={{{@in6=@dev, @in=@private}}, {{@in=@empty}, 0x0, @in6=@dev}}, &(0x7f00000006c0)=0xe8) (async) getsockopt$inet6_IPV6_IPSEC_POLICY(r4, 0x29, 0x22, &(0x7f00000005c0)={{{@in6=@dev, @in=@private, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}}, {{@in=@empty}, 0x0, @in6=@dev}}, &(0x7f00000006c0)=0xe8) sendmsg$nl_generic(0xffffffffffffffff, &(0x7f0000000980)={&(0x7f0000000580)={0x10, 0x0, 0x0, 0x4000000}, 0xc, &(0x7f0000000940)={&(0x7f0000000700)={0x238, 0x21, 0x400, 0x70bd2b, 0x25dfdbfb, {0x2}, [@nested={0x9e, 0x8d, 0x0, 0x1, [@generic="dfcbc232952c8b1529afd9e9ffafbc538a8157d5ab9f22ffa3fd2f63ffe4acec99b86866b5cc36f6fb2df7ca55d190128bcf32c3a8f87426debb95bd7b9624139ebaa657cfc92930d2c45575060788ba1d24586abd626b73c0580c00c5512ac6db548b60e98cc93efb9ff948a8338c2b922dc35871164e48d14f4bec990d6b1abb7b6f12ad5f55e3933c0c3290fb4dae3ef739cbc138ace2c96e"]}, @nested={0x181, 0x84, 0x0, 0x1, [@generic="d8e63a536da3c5bb0086b9c4af4736f4b84bd7f60d33c865cefb57d21664dcee43d03fbeb8a9e99c856aa9787516e41f", @generic="9e80cb85750c013c961ab48c30ee618cdaeee0766e2a", @generic="9adae4c44c4610a24ebbfdfe9d0fb6b1f00c744f5898dbaf43ed2882a6ffdf01b1a5df1f6c78299406b36a89a3ed109c81338eae23c777ff", @typed={0x8, 0x4e, 0x0, 0x0, @uid=r7}, @generic="ce7509ed1f7200415caa876e7df7fd3b5cf8f3fc110e8c4e410701b822b810262ac036ce137812fd1633dabb209194c2201a0b4d7a8a48423133771ed1d25c04f03912c4106e9e232087aedb8088207b27f1694cc72f256c9f47281c1fcef7ae721efef74d4df66e6fb38698ba10720c9d711dbf3358e3994d821fb3adc4220d45d11a809643695936707e6b9f3edaeef0ff6f559d97fb01c2abd19565f69b91d66bd16fda896b5291c09c925ee120556fd4c18617b13915fce990bcccf04f6e08a408db6bff83b19595abaf64495e00a3a2d375e42502102e994c7d95cba54e0604f1f178dc35215fd7c8dfb783f2", @typed={0x8, 0xa, 0x0, 0x0, @ipv4=@loopback}]}]}, 0x238}, 0x1, 0x0, 0x0, 0x4000}, 0x4008081) (async) sendmsg$nl_generic(0xffffffffffffffff, &(0x7f0000000980)={&(0x7f0000000580)={0x10, 0x0, 0x0, 0x4000000}, 0xc, &(0x7f0000000940)={&(0x7f0000000700)={0x238, 0x21, 0x400, 0x70bd2b, 0x25dfdbfb, {0x2}, [@nested={0x9e, 0x8d, 0x0, 0x1, [@generic="dfcbc232952c8b1529afd9e9ffafbc538a8157d5ab9f22ffa3fd2f63ffe4acec99b86866b5cc36f6fb2df7ca55d190128bcf32c3a8f87426debb95bd7b9624139ebaa657cfc92930d2c45575060788ba1d24586abd626b73c0580c00c5512ac6db548b60e98cc93efb9ff948a8338c2b922dc35871164e48d14f4bec990d6b1abb7b6f12ad5f55e3933c0c3290fb4dae3ef739cbc138ace2c96e"]}, @nested={0x181, 0x84, 0x0, 0x1, [@generic="d8e63a536da3c5bb0086b9c4af4736f4b84bd7f60d33c865cefb57d21664dcee43d03fbeb8a9e99c856aa9787516e41f", @generic="9e80cb85750c013c961ab48c30ee618cdaeee0766e2a", @generic="9adae4c44c4610a24ebbfdfe9d0fb6b1f00c744f5898dbaf43ed2882a6ffdf01b1a5df1f6c78299406b36a89a3ed109c81338eae23c777ff", @typed={0x8, 0x4e, 0x0, 0x0, @uid=r7}, @generic="ce7509ed1f7200415caa876e7df7fd3b5cf8f3fc110e8c4e410701b822b810262ac036ce137812fd1633dabb209194c2201a0b4d7a8a48423133771ed1d25c04f03912c4106e9e232087aedb8088207b27f1694cc72f256c9f47281c1fcef7ae721efef74d4df66e6fb38698ba10720c9d711dbf3358e3994d821fb3adc4220d45d11a809643695936707e6b9f3edaeef0ff6f559d97fb01c2abd19565f69b91d66bd16fda896b5291c09c925ee120556fd4c18617b13915fce990bcccf04f6e08a408db6bff83b19595abaf64495e00a3a2d375e42502102e994c7d95cba54e0604f1f178dc35215fd7c8dfb783f2", @typed={0x8, 0xa, 0x0, 0x0, @ipv4=@loopback}]}]}, 0x238}, 0x1, 0x0, 0x0, 0x4000}, 0x4008081) setsockopt$inet6_IPV6_XFRM_POLICY(r3, 0x29, 0x23, &(0x7f0000000180)={{{@in6=@remote, @in=@private=0xa010100, 0x4e21, 0xfff, 0x4e24, 0x9, 0x2, 0x80, 0x80, 0x3b, 0x0, r7}, {0x2, 0x2, 0x2, 0x0, 0x40, 0x800000, 0xee, 0x80}, {0x7, 0x7f, 0x2, 0xc6}, 0x106, 0x6e6bb5, 0x1, 0x0, 0x0, 0xf498d57fab2e5c95}, {{@in6=@mcast1, 0x4d6, 0x6c}, 0x2, @in6=@dev={0xfe, 0x80, '\x00', 0x34}, 0x3504, 0x3, 0x2, 0x2, 0x9, 0x6, 0x100}}, 0xe8) r8 = openat$cgroup_ro(r1, &(0x7f00000000c0)='net_prio.prioidx\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r8, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r9, 0x0, 0x8000000000004) sendmsg$nl_route_sched(r1, &(0x7f0000000540)={&(0x7f00000004c0)={0x10, 0x0, 0x0, 0x4000000}, 0xc, &(0x7f0000000500)={&(0x7f00000009c0)=ANY=[@ANYBLOB="3c062ef900fcdbdf2500000000", @ANYRES32=r6, @ANYBLOB="f2ff09000400f3ff0f00ffff0b000100666c6f776572000028000200060025000000000014001000fe8000000000000000000000000000aa050036000000000006000500030000000c0001007463696e64657800d0050200cc050700c80515000b000100706f6c6963650000180502800c00080000feffffffffffff080004005d9700003c000100008000000100000000080000020000000f630000ba01050008000300ffffffff7f02030040000600000000000300000006000000000000000c00080006000000000000003c0001000001000003000000ff00000006000000020000000801ff7fff7f000100000000090001009700090006000000ffffff7f00000000c76400003c000100b10400000500000001000000bb530559000000002002050004000100080000007f0202000080380f86269c7c6a000000050000000600000008000400b790000008000500000000000c0008002e0e0000000000000c000900070000000000000004040300070000008100000001800000090000000400000001000080800000001f0000000002000007000000040000000900000005000000432a0000880000003a000000060000000900000084ae000000002c020000000000010000030000009e0b0000e1ffffff0200000000000000070000004f060000040000000000000007000000ff01000027170000040000000800000001040000860000000700000008000000020000000100000003000000010000800900000000000000050000000300000008000000070000000000000000000000010000000800000003000000ffffffff040000008100000009000000050000006d0b0000010100000900000001000000000100000900000000000080000000000300000001000000000200000500000006000000050000000000010000020000010000000700000085d500001f00000007000000070000009484000004000000000400000200000001000000ff01000002000000010000000300000000000000b2000000050000000400000003000000020000000600000005000000000800000004000008000000ff07000005000000119c00000300000001000000070000007f000000060000000101000005000000060000000400000002000000c0ffffff7fbf0000bfd6000000000000000001000600000007000000000001000080000088050000f9ffffff0700000000000000040000000101000005000000de0e00000000000008000000030000000600000073c50000ff7f00000500000003000000810000000700000005000000020000000900000009000000ffffff7f040000000700000001000100000400000000000007000000020000000000008011000000030000000000000000001c0007000000b70000000800000007000000010100000400000004000000080000001f000000040000000400000000000000f699830f080000000300000002000000010000800180000005000000c46400000000000000000000050000003f0000000010000009000000080000000200000002000000080000000ce3000007000000000000000101000003000000f8ffffffffffff7fa8600000040000000900000009000000020000001c0a0000ff0f0000000400000000327bff7f0000000800000100000008000000ffffff7f0000000001800000dccb0000a47800000400000009000000000000007270000000f0ffff010000000100010000000000790c0000070000000000000007000000090000004500000006000000070000000001000020000000080d0000040000000600000002000000020000001f000000000200000500000000000000030000000100008044ad000007000000c1e8000080000000020000000800000003000000fdffffff00000000810000001f0b000005000000020000000c0009000002000000000000080005000700000087000600cba97a8ecb8c7d88e326ea514585238fbe1b5f07aa99007d8157be4a8dcb839419975627c5242a9664ea98c88b816f6fc20865568a6488bdcdda4a93b85195827ec4d216a433ded473854f212ac709225e7a6c6e3b305d9af5656815c97b1ba54fcbb5e3a7d6046d97fd4bd2d29eec2b5e1ca8d37e640e38676ef622403c2f39e418c7000c00070001000000010000000c0008000300000001000000"], 0x63c}, 0x1, 0x0, 0x0, 0x4000008}, 0x8011) setsockopt$inet_sctp6_SCTP_PARTIAL_DELIVERY_POINT(r9, 0x84, 0x13, &(0x7f0000000100)=0x886c, 0x4) pipe(&(0x7f0000000140)={0xffffffffffffffff, 0xffffffffffffffff}) sendfile(0xffffffffffffffff, 0xffffffffffffffff, 0x0, 0x8000000000004) (async) sendfile(0xffffffffffffffff, 0xffffffffffffffff, 0x0, 0x8000000000004) preadv(0xffffffffffffffff, &(0x7f0000001580)=[{&(0x7f0000001080)=""/201, 0xc9}, {&(0x7f0000001180)=""/227, 0xe3}, {&(0x7f0000001280)=""/248, 0xf8}, {&(0x7f0000001380)=""/10, 0xa}, {&(0x7f00000013c0)}, {&(0x7f0000001d80)=""/4096, 0x1000}, {&(0x7f0000001400)=""/93, 0x5d}, {&(0x7f0000001480)=""/35, 0x23}, {&(0x7f00000014c0)=""/180, 0xb4}], 0x9, 0x0, 0x5) close(r10) mmap(&(0x7f0000ffa000/0x4000)=nil, 0x4000, 0x2000002, 0x1010, r10, 0xdbafc000) (async) mmap(&(0x7f0000ffa000/0x4000)=nil, 0x4000, 0x2000002, 0x1010, r10, 0xdbafc000) getsockopt$inet_sctp_SCTP_MAXSEG(r11, 0x84, 0xd, &(0x7f0000001000), &(0x7f0000001040)=0x4) ioctl$BTRFS_IOC_SET_FEATURES(r9, 0x40309439, &(0x7f0000000080)={0x1, 0x3, 0x2}) openat$cgroup_ro(r1, &(0x7f0000000000)='hugetlb.2MB.usage_in_bytes\x00', 0x0, 0x0) [ 2169.701614][ T7148] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:41:46 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xc2030000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:41:46 executing program 4: pipe(&(0x7f00000002c0)={0xffffffffffffffff, 0xffffffffffffffff}) sendmsg$RDMA_NLDEV_CMD_RES_GET(r0, &(0x7f0000000480)={&(0x7f00000003c0)={0x10, 0x0, 0x0, 0x584e5f7cb5ae3b1a}, 0xc, &(0x7f0000000440)={&(0x7f0000000400)={0x20, 0x1409, 0x8, 0x70bd26, 0x25dfdbfc, "", [@RDMA_NLDEV_ATTR_DEV_INDEX={0x8}, @RDMA_NLDEV_ATTR_DEV_INDEX={0x8, 0x1, 0x1}]}, 0x20}}, 0x20000810) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r1, 0x0, 0x8000000000004) (async) r2 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r3 = openat$cgroup_ro(r2, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r3, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) (async) ioctl$FS_IOC_RESVSP(r3, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="79ebf74a5521c4fcc972aff6e1b2c11108f89cc02a767211e12e64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d4f984b8f3250bd7aab2edeb6904", @ANYRES32=r3, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) (async) write$tun(r3, 0x0, 0x0) (async) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) (async) r5 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r5, r4, 0x0, 0x10000a006) getsockopt$inet6_IPV6_IPSEC_POLICY(r4, 0x29, 0x22, &(0x7f00000005c0)={{{@in6=@dev, @in=@private, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}}, {{@in=@empty}, 0x0, @in6=@dev}}, &(0x7f00000006c0)=0xe8) sendmsg$nl_generic(0xffffffffffffffff, &(0x7f0000000980)={&(0x7f0000000580)={0x10, 0x0, 0x0, 0x4000000}, 0xc, &(0x7f0000000940)={&(0x7f0000000700)={0x238, 0x21, 0x400, 0x70bd2b, 0x25dfdbfb, {0x2}, [@nested={0x9e, 0x8d, 0x0, 0x1, [@generic="dfcbc232952c8b1529afd9e9ffafbc538a8157d5ab9f22ffa3fd2f63ffe4acec99b86866b5cc36f6fb2df7ca55d190128bcf32c3a8f87426debb95bd7b9624139ebaa657cfc92930d2c45575060788ba1d24586abd626b73c0580c00c5512ac6db548b60e98cc93efb9ff948a8338c2b922dc35871164e48d14f4bec990d6b1abb7b6f12ad5f55e3933c0c3290fb4dae3ef739cbc138ace2c96e"]}, @nested={0x181, 0x84, 0x0, 0x1, [@generic="d8e63a536da3c5bb0086b9c4af4736f4b84bd7f60d33c865cefb57d21664dcee43d03fbeb8a9e99c856aa9787516e41f", @generic="9e80cb85750c013c961ab48c30ee618cdaeee0766e2a", @generic="9adae4c44c4610a24ebbfdfe9d0fb6b1f00c744f5898dbaf43ed2882a6ffdf01b1a5df1f6c78299406b36a89a3ed109c81338eae23c777ff", @typed={0x8, 0x4e, 0x0, 0x0, @uid=r7}, @generic="ce7509ed1f7200415caa876e7df7fd3b5cf8f3fc110e8c4e410701b822b810262ac036ce137812fd1633dabb209194c2201a0b4d7a8a48423133771ed1d25c04f03912c4106e9e232087aedb8088207b27f1694cc72f256c9f47281c1fcef7ae721efef74d4df66e6fb38698ba10720c9d711dbf3358e3994d821fb3adc4220d45d11a809643695936707e6b9f3edaeef0ff6f559d97fb01c2abd19565f69b91d66bd16fda896b5291c09c925ee120556fd4c18617b13915fce990bcccf04f6e08a408db6bff83b19595abaf64495e00a3a2d375e42502102e994c7d95cba54e0604f1f178dc35215fd7c8dfb783f2", @typed={0x8, 0xa, 0x0, 0x0, @ipv4=@loopback}]}]}, 0x238}, 0x1, 0x0, 0x0, 0x4000}, 0x4008081) (async) setsockopt$inet6_IPV6_XFRM_POLICY(r3, 0x29, 0x23, &(0x7f0000000180)={{{@in6=@remote, @in=@private=0xa010100, 0x4e21, 0xfff, 0x4e24, 0x9, 0x2, 0x80, 0x80, 0x3b, 0x0, r7}, {0x2, 0x2, 0x2, 0x0, 0x40, 0x800000, 0xee, 0x80}, {0x7, 0x7f, 0x2, 0xc6}, 0x106, 0x6e6bb5, 0x1, 0x0, 0x0, 0xf498d57fab2e5c95}, {{@in6=@mcast1, 0x4d6, 0x6c}, 0x2, @in6=@dev={0xfe, 0x80, '\x00', 0x34}, 0x3504, 0x3, 0x2, 0x2, 0x9, 0x6, 0x100}}, 0xe8) (async) r8 = openat$cgroup_ro(r1, &(0x7f00000000c0)='net_prio.prioidx\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r8, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r9, 0x0, 0x8000000000004) (async) sendmsg$nl_route_sched(r1, &(0x7f0000000540)={&(0x7f00000004c0)={0x10, 0x0, 0x0, 0x4000000}, 0xc, &(0x7f0000000500)={&(0x7f00000009c0)=ANY=[@ANYBLOB="3c062ef900fcdbdf2500000000", @ANYRES32=r6, @ANYBLOB="f2ff09000400f3ff0f00ffff0b000100666c6f776572000028000200060025000000000014001000fe8000000000000000000000000000aa050036000000000006000500030000000c0001007463696e64657800d0050200cc050700c80515000b000100706f6c6963650000180502800c00080000feffffffffffff080004005d9700003c000100008000000100000000080000020000000f630000ba01050008000300ffffffff7f02030040000600000000000300000006000000000000000c00080006000000000000003c0001000001000003000000ff00000006000000020000000801ff7fff7f000100000000090001009700090006000000ffffff7f00000000c76400003c000100b10400000500000001000000bb530559000000002002050004000100080000007f0202000080380f86269c7c6a000000050000000600000008000400b790000008000500000000000c0008002e0e0000000000000c000900070000000000000004040300070000008100000001800000090000000400000001000080800000001f0000000002000007000000040000000900000005000000432a0000880000003a000000060000000900000084ae000000002c020000000000010000030000009e0b0000e1ffffff0200000000000000070000004f060000040000000000000007000000ff01000027170000040000000800000001040000860000000700000008000000020000000100000003000000010000800900000000000000050000000300000008000000070000000000000000000000010000000800000003000000ffffffff040000008100000009000000050000006d0b0000010100000900000001000000000100000900000000000080000000000300000001000000000200000500000006000000050000000000010000020000010000000700000085d500001f00000007000000070000009484000004000000000400000200000001000000ff01000002000000010000000300000000000000b2000000050000000400000003000000020000000600000005000000000800000004000008000000ff07000005000000119c00000300000001000000070000007f000000060000000101000005000000060000000400000002000000c0ffffff7fbf0000bfd6000000000000000001000600000007000000000001000080000088050000f9ffffff0700000000000000040000000101000005000000de0e00000000000008000000030000000600000073c50000ff7f00000500000003000000810000000700000005000000020000000900000009000000ffffff7f040000000700000001000100000400000000000007000000020000000000008011000000030000000000000000001c0007000000b70000000800000007000000010100000400000004000000080000001f000000040000000400000000000000f699830f080000000300000002000000010000800180000005000000c46400000000000000000000050000003f0000000010000009000000080000000200000002000000080000000ce3000007000000000000000101000003000000f8ffffffffffff7fa8600000040000000900000009000000020000001c0a0000ff0f0000000400000000327bff7f0000000800000100000008000000ffffff7f0000000001800000dccb0000a47800000400000009000000000000007270000000f0ffff010000000100010000000000790c0000070000000000000007000000090000004500000006000000070000000001000020000000080d0000040000000600000002000000020000001f000000000200000500000000000000030000000100008044ad000007000000c1e8000080000000020000000800000003000000fdffffff00000000810000001f0b000005000000020000000c0009000002000000000000080005000700000087000600cba97a8ecb8c7d88e326ea514585238fbe1b5f07aa99007d8157be4a8dcb839419975627c5242a9664ea98c88b816f6fc20865568a6488bdcdda4a93b85195827ec4d216a433ded473854f212ac709225e7a6c6e3b305d9af5656815c97b1ba54fcbb5e3a7d6046d97fd4bd2d29eec2b5e1ca8d37e640e38676ef622403c2f39e418c7000c00070001000000010000000c0008000300000001000000"], 0x63c}, 0x1, 0x0, 0x0, 0x4000008}, 0x8011) (async) setsockopt$inet_sctp6_SCTP_PARTIAL_DELIVERY_POINT(r9, 0x84, 0x13, &(0x7f0000000100)=0x886c, 0x4) (async) pipe(&(0x7f0000000140)={0xffffffffffffffff, 0xffffffffffffffff}) sendfile(0xffffffffffffffff, 0xffffffffffffffff, 0x0, 0x8000000000004) (async) preadv(0xffffffffffffffff, &(0x7f0000001580)=[{&(0x7f0000001080)=""/201, 0xc9}, {&(0x7f0000001180)=""/227, 0xe3}, {&(0x7f0000001280)=""/248, 0xf8}, {&(0x7f0000001380)=""/10, 0xa}, {&(0x7f00000013c0)}, {&(0x7f0000001d80)=""/4096, 0x1000}, {&(0x7f0000001400)=""/93, 0x5d}, {&(0x7f0000001480)=""/35, 0x23}, {&(0x7f00000014c0)=""/180, 0xb4}], 0x9, 0x0, 0x5) (async) close(r10) (async) mmap(&(0x7f0000ffa000/0x4000)=nil, 0x4000, 0x2000002, 0x1010, r10, 0xdbafc000) (async) getsockopt$inet_sctp_SCTP_MAXSEG(r11, 0x84, 0xd, &(0x7f0000001000), &(0x7f0000001040)=0x4) ioctl$BTRFS_IOC_SET_FEATURES(r9, 0x40309439, &(0x7f0000000080)={0x1, 0x3, 0x2}) (async) openat$cgroup_ro(r1, &(0x7f0000000000)='hugetlb.2MB.usage_in_bytes\x00', 0x0, 0x0) [ 2169.997565][ T7106] bond1396: (slave bridge1261): making interface the new active one [ 2170.014401][ T7106] bond1396: (slave bridge1261): Enslaving as an active interface with an up link [ 2170.028859][ T7124] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 10:41:46 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0x8a400000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:41:46 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) r1 = socket$inet6_sctp(0xa, 0x5, 0x84) getsockopt$inet_sctp6_SCTP_SOCKOPT_CONNECTX3(r1, 0x84, 0x6f, &(0x7f0000000000)={0x0, 0x1c, &(0x7f0000000080)=[@in6={0xa, 0x0, 0x0, @rand_addr=' \x01\x00'}]}, &(0x7f0000000180)=0x10) r2 = socket$inet_sctp(0x2, 0x1, 0x84) getsockopt$inet_sctp_SCTP_MAX_BURST(r2, 0x84, 0xd, &(0x7f0000000000)=@assoc_value={0x0}, &(0x7f0000000240)=0x8) getsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(r1, 0x84, 0x9, &(0x7f00000000c0)={r3, @in6={{0xa, 0x0, 0x0, @rand_addr=' \x01\x00'}}}, &(0x7f00000001c0)=0x9c) r4 = socket$inet6_sctp(0xa, 0x5, 0x84) getsockopt$inet_sctp6_SCTP_SOCKOPT_CONNECTX3(r4, 0x84, 0x6f, &(0x7f0000000000)={0x0, 0x1c, &(0x7f0000000080)=[@in6={0xa, 0x0, 0x0, @rand_addr=' \x01\x00'}]}, &(0x7f0000000180)=0x10) r5 = socket$inet_sctp(0x2, 0x1, 0x84) getsockopt$inet_sctp_SCTP_MAX_BURST(r5, 0x84, 0xd, &(0x7f0000000000)=@assoc_value={0x0}, &(0x7f0000000240)=0x8) getsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(r4, 0x84, 0x9, &(0x7f00000000c0)={r6, @in6={{0xa, 0x0, 0x0, @rand_addr=' \x01\x00'}}}, &(0x7f00000001c0)=0x9c) sendmmsg$inet_sctp(0xffffffffffffffff, &(0x7f0000002500)=[{&(0x7f0000000080)=@in6={0xa, 0x4e21, 0x8, @ipv4={'\x00', '\xff\xff', @dev={0xac, 0x14, 0x14, 0x22}}, 0x8}, 0x1c, &(0x7f00000000c0)=[{&(0x7f0000000180)="3d65fa7e48adeec0fd3c489625d4590724a2f28c77ada8c7de8af7252e9550e02b72579a5606a174fff5fa29ffedf53c42161d683b2f1db5c9fc263c5dcf4b79c9a07ec58eb4443e6a7b1f6b76c0c400784326a4df70458a2e870c8a5d55618c1e14b31a066bbeb8e31c1842486627b42ef71f16e9ff9cfbc8908d396110397eccb0596f493fe4dc8d4768c3f3bef790df5246ca7f9a0abb29c83b8cae3390f5237f75bb9d6be2f1d57bfc992f03ec6e3c74fa6577f01b26c8648111533ec701aa8f50089f2e2c2a68a5c30d3af6fce829ec57bb1c221e869f86edc900f32ceda6c8651c8cce9cd8639e9cfe2b8de5b177d4a2403eef1f3d3898", 0xfa}, {&(0x7f0000000280)="2536f5a441b8bada3ce0d7692f5baedefac1854476d2b939492d8d32baff8735ec2776bde22d10137eb8471b4ecfd9100946d150d6624b79bb169baab07a22bacb9cade18b2a4b494c82596e771ddd886de49836f54e689303f135b2615a3922a0447e235b69e53de3d66c8a34f22aa27d606311c13171b720cc58ccb7c08a9563a6cf", 0x83}, {&(0x7f0000000400)="1e310bf3e9424d30b348f30e22813e76839837ccf621b49ac36c208abba39fdc5706cb118c10c773d2d9c7f52b2620b86317029ba48519c877b716989f333b3ce22634732385a0415d2880858c18f3cbe3ab43083c44f00942c439bdc00e736adee871877fde441ca000c1268f9dfcc6fa34518d35a5decfbc965c522de258e3196bc3323d91f8340a4c3b4fdc93278274f80e8fa64273ae6563d31eb62bcd966cb1b5163f1551f9cbd0d6b33ec50f0a67f5cf1af42233175a16c731c783c6637e983e256f87b12db03d216e5ea564d189964bc711b5e94b6b54366721fe3074971681c4ee71faf8800e7ba7692115e7", 0xf0}, {&(0x7f0000000500)="aab4f6825921c0f35d8bd6685c87775994cc4dc6e1c0788b9fe9e5d7272c4e4ec1bde6698a8e82936cd7c200040537dc763671411f1184302103feca44bbc74c5540141c51d0283b541340da3bbeced6ba7ec09e5c10dedecbc696525125e1c2303cb313d9a26d61fe523c7262142390209584adb3fd01bbca9d13553eea79f22cb14819d1cbd391b7490f12b76466174362b0a277febf83ca8fa1ddbd5a0987eafaa1130a8848143cf96ce42f22a22439f74c0580e85b5fbb47a28288b28fd03bb6c6031af9befcc459062b2c973691a87e92bf3143e7fc79a99b2607a16eb87c5d07a21e", 0xe5}], 0x4, &(0x7f0000000100)=[@init={0x18, 0x84, 0x0, {0x478e, 0x9, 0x8, 0x8}}, @prinfo={0x18, 0x84, 0x5, {0x20, 0x5}}], 0x30, 0x80}, {&(0x7f0000000340)=@in6={0xa, 0x4e20, 0x8, @local, 0x6}, 0x1c, &(0x7f0000000700)=[{&(0x7f0000000600)="9c8c1438807510237179d0a82f35034713fb662ca2cd1068970928122898cc7a42b1c9ae455e67af829f9db8e0740647f44d889e65d5714bf3de9b2e87358129f17484ad858687718331d5ac728ae4c622957c848091b3f5fa4cef8642491f70201d8c766e3defac45b68a7f3c79bcebab93c8bcdd6ac775cf6601bbc87c79192f145822fecadebfe48f7daa6f9be2713208cda055ed37c8db65ab617ee856484422191fcaba0d617124165019fbe2a31bbbe48c6156cd2661e8c3c64e5c617bc89f3a295c017f3f7265a0f50e563bad6bec2450aa3a22d216ba561e65a3ef76fafd84cbe8792d3d9c", 0xe9}, {&(0x7f0000000380)}], 0x2, &(0x7f0000000740)=[@authinfo={0x18, 0x84, 0x6, {0x15a}}, @dstaddrv6={0x20, 0x84, 0x8, @local}, @authinfo={0x18, 0x84, 0x6, {0x9}}, @init={0x18, 0x84, 0x0, {0xff, 0x7, 0x6, 0xffff}}, @init={0x18, 0x84, 0x0, {0x8, 0xde33, 0x0, 0x4}}, @dstaddrv4={0x18, 0x84, 0x7, @dev={0xac, 0x14, 0x14, 0x30}}], 0x98, 0x10}, {&(0x7f0000000800)=@in={0x2, 0x4e20, @local}, 0x10, &(0x7f0000000d40)=[{&(0x7f0000000840)="c34c73687c4e1741a17d136e935daeb3bfd1f39392dc147348b60254468195b82cd7c325f8836930f5513f90753881ab73910bca9358cb8c13e6e7dcfcb8a68644a5217f7c67d889f80de01fb9501b04a1a6bfb95e827585df940620ddc74f4ceb6b8640e62b9c534a00ecb73220f8f8da6350c89832ccae76ac722e9c740e318c782b2677f1d4603b4b6ac9cb39beda49fa4f9b962160c3f57f291ebd8f6a44753bf44e6a6a23d8d159b9cc6e6a02e0cc6c9ead4928629dfa4fc87a7be3a1edecac73f6d20e2b5d086eaa056a6e7b66260491c605200f5b4e9bd40e123c03f36031df70236eb0aad4b1060f1ffcbeef88c229c3c1f4a6bdba", 0xf9}, {&(0x7f0000000940)="7d1e8041a68f950f5575bd38520ead5976ad83083c5142b2e54437b3cdb5b594e3a3801f2d907a23c89523276fafe2a42157e92dc154e9ecbdc5feabe2beff84efdce7044a114aec3f9a9fbfda9c844db16d049964e0d3a9b5fd90cafd7dbb65a540c29f35775331434348971b5b1887b190bc4a98daf2c88a059cbc71d6775af422cc3dae65e28d41551e78413f69509863e9dd988d34f4a3f00b0375936e6893aaf9a269c6a4be36799393ccc31f7e0bf55745aa209d7336c0b368fbeb79", 0xbf}, {&(0x7f0000000a00)="cc7304e3bdb7a92ccb7e59dedc29d0480a02d6b073c253348af3fc42980706e9aeeecbc6fa9f1c41cd7a4873ec7f49350f1aaeb74f441313e4aba8597dee894d3a5a527f464e0b6431f9c1da346834d4d49b0c1ecfe4096f952f6aaedcf5ce3a64d01da54ebfa390bd5e72c82794792a3e3adc6d72d73c4904bf71d2", 0x7c}, {&(0x7f0000000a80)="63fe68478e6b6c0f4d1388693cf1cfdf35242a6c9464f94e0dc2a7a785b0f91be367c7e6d84850f7cc3f314711eb4fc253f48171290be50e58b6914d1daf5e084748e8215b00eb4bf336910db166ec9376cd40cfba2f75e50ce1325e281b66093eea2e82b90db05b7b79031155b227b9afae9b49288003", 0x77}, {&(0x7f0000000b00)="7227d3e3fc465b4142b03060839ed314fd69e8a2aca47f74a7d755ab55ea76db422bcc0c799dde7a967a02d7eab32794c53f670b6340c5c3f7e91a6290c17d05c2f97291a90eb3798456a205b9af37ebb6056225ec598c2d06c7f02c90776e012b498a55af4cd5c1c1488015a2f334cd9d9d15b346644aa18bd4af6b53dfb2ec0cfa47d73ad17d1d9e6bbbdd9646ecf3d97078e2560a3552862b70522c61b60bcd4719fb6eee5f76caba01a4b96e0c4615b554df26a79feba318288a2d942e800de6fda6cef342732111eb4cd47b3180254030c2d1b5f2740a3d0a47beb14a9f19500d3555c97b7a175119a66f1085ccbccee376cdd40dd84f0e5662c2ae", 0xfe}, {&(0x7f0000000c00)="80ccba57687b4ee4139c5ac44688431acd3f5a958d02a5da1da3f9eb893b9086d4e04397ce71bd230cb9e2edab09ee1c81941ca5dc87bb1224aff67adabe2b0958aecf9f0e3a672cbe8dd8990a5410d18619f09d9e65f824a5f4153c7a62a0e7a27fe6ac68260032ea84a8d5bc42ec5a0a25b325f856d8", 0x77}, {&(0x7f0000000c80)="4268d77d28cadf351cba2f13396c51cf4e8894ac2c347fe6c6206fe098cb55ba00f1a92b989c32ed676ca34490a50571bf5e9193afcda87c822f43c2fb83e98ae5ad29043cb0c3914aad610b6f094867e72dbd528df2d50e9120f8317c4f8e84c3881d80f0f2f8773e3935b45abb99a977570cc4d2095541dd148beea6e6acb513f47cd22d73417fdc91b13bd5be2757a65a8eed1d8d896b1b7dab5dfdbf0d1e82681dfc1a90377d9e63dcd5ebc6361d8f44dfbfcd9d373acdd60d", 0xbb}], 0x7, &(0x7f0000000dc0)=[@init={0x18, 0x84, 0x0, {0xb160, 0x7fff, 0x8}}], 0x18, 0x20004010}, {&(0x7f0000000e00)=@in6={0xa, 0x4e21, 0x7fff, @ipv4={'\x00', '\xff\xff', @loopback}, 0x8}, 0x1c, &(0x7f0000002300)=[{&(0x7f0000000e40)="934b98851726179e510473ec539a49a6f5906b436731eb995c7ff30a2287b46010069a6f26a61190d4d3e965ee6d5b74af2895e29dfe58dac0f8d14b5895320136cc92bef857fd2cce0505844403ff2cd9e2f6a1d9c577d8aff7039644b8b4bbc7dfa95f8739ac48629d15d0e80b6c1a09751159616adace486f883b852cebd6d80d17911a2350b0cb7401b960e8506f8201c3e7e2c821a683319cdfede6b1853b880ffa048a2f390fc01e7e0eab5af2fd999d225ac1f94f535551e028d07edc408f7fd81dab6c90965188b403a683aa3f4ec5490a2fbb6bcf33", 0xda}, {&(0x7f0000000f40)="952ff30e3f3bcc41091a623e049ea1e94daf03d6a288239477ffffa3399dbac54d4ef100ef9f1534ea12dcfafcea00d9906a83859c3ab406ac43bc7bd9c085100291c380b3bb759c16018aa897e720c9af604d6a65176bf53f97ae2f3acc0d0ae28397ef12c87c841ce50442e6f80335dc3d08be79b9edc91e0b232424de9df600456704570ac0810fe13107c23070e813073d4f740cdede6e00cdb74335e125a20c5dc4350dad8c4ed146718d002d40f7ec4a676cbde53a96a580f38e7085a2eacdfd9886", 0xc5}, {&(0x7f0000001040)="7413ce088fa0a62855cede438e1602d0121c5032403d6e0d926ee2107d8a8f1ad2ff338407b00d79d147d5e80461882d5f88811b84f76e60b8ba7827127b398a890c4d1c0b087b3d1c3c8982fb5cc855ad25970e9b015dae07d01820d3bc195a121562db792463f1e3dc93d0c0bc3d4c6eb7e8e997ae77dd15c0327d12d786faa4a769a1438fff524162e4b9d575f8d4a7ec8ada646d987508407992732b4e0f5f55cea70fe27663115a37a77e72d24bc94871311fb3cf8bccb7d05456fed014e0bd5c27981031373fa63bf8709a72f16ca93eb1fbcd53f79c731744e9e33285f0fed5a1a0a8244e8117bb865917aaace428923f42d858c273f82216fa2ab53c37c27386f812b588a117a7df8e281739224cc2c60b26e45edd1095beeded9e3b7d5a7eee4810257a7aad775721d7ebfaf5a047a30c43363964a720e300967bac7e0f54f61d3e5756b0273c79ebf95e01ac30907eb9eaf661efa1771291f6605385e734582c1a100808faceb2bef516da546169b82c594d346fbf1d224f98dae9b9ff4d39714bb11d61c0ff81425be2b6b60f779c5d59549481357023f5a1b701ef0a2b8f780656e6250c171b1a96c0c60e5ef80745caa8c3b1025b274793a6bc5da5426e1b208d60f64f3d6c8e0b057d579d3935e71d2134c90327eeb4e53688f2f751d5981b960257e9de7c5b55d9711c9f2d0f6daeb307d21906cd4fd69cc791e96b7938460e35007cb79cd0e7f4d4475ae797659608b5ce091bfeea4ea9271b545484f77b0ff8651a5f09cacbcc0c3b4a12c1232936c886d771ad65ce643acc3c12d5a33a842d25ae0f8ca0e5d01251ff03e267a2e04224aebfc2b75fa5d3147b38201f5f124b30cde4062893c7736fd61974969b28a377eb0a4c3c4e0fe9d988e6706839d21e7b45a72284d8c639723a1bd230e0ab6ad8d9e811fc7ce66bc2032c82a5d89670d00bb76c7fbdf764268618c790d5b55eb63e2debd733f1d2db713a722ca9bb8ca044d31be385df1573468fd91c92fda22029cc4f441405aa25c13e7cc4e7706396adbdfc64acbade757cd1b64330919f43d17c51a8aa6539c7eb463df2e411c46109c0843f2121660f021b8b2c08d2daedd1d00e9ba1fcd491a4817fa897cd485b09028b995c4016d05a0650514a72ac6b7c5e49edfc50bd0b10ebb36a4878b7e0425ecbc5107a48635e7b54adfbfcf72648c509b7562108d4c586fcf51c2b8259cc8d4eebcf99c71fd741434603beef5bc58cd1f4c7cf44ceeec46285bc15a251ef432c232849058a212bf3b49e7337c69c28a7b800844549c631381088375c921453d30b5b7e929c5382e66d9b3a8512481d4963e0117d4c5adf44e1a1debd5984f0ece0ee0e4534442967cb79dd2a4bbd6723b4bd6e9814c17f0125ceb05f6320aa0fab26cbbbb99c5d9cd5197bf19907d0b29e1cada9e8a00b4abc9916b7ac5bec96e868707398bff38c089ba7c2c7bc300817e6de7f0c527036abba68c8292a3e5e19a01e5371cad9699dcacb974c83f8b671d0e4320e22309e6be74dca954af9ba9fc76e6d5c392059611e4f4ad1e60fada161d70c32ced7b39bbb31fd73dde596f1ab2a6962bc5deb92fed1f98c74ca779c95f6b41c745b39be56fb230abbcf579dc0d30b8ee0afe78bbb849c7ac29ca8fb876215419cf6ce2d38cc99e6d528c98d8912d6454ca999dbf1ca5c0a52d70b33b02e930dd473a24580e66a53f74f46dd2a53805366cf75e0094555a963d02d3af0d8e7cd1ab592a19e985970f3dfecfc7bf7e0d32f94b0e1d13888394b32a78b92928dbfe61f02756c8a4cfd507350709681d33000679c2071f7cdf5d946c4cad93b4be5d2322035c32747079bd8148df6fb51c59b2c8dc9a95d89f1f67a82731f4a4314b0469e500d2d6244097f5c622cae59afe0d973cffd87919b129ba16404003559a376123d72e3b153916b4fcec2c8e70f4d5904bb6d961ece44eb8037ed7fbb0de43b068360472d038f0063d3dcabc70e22c4ac84310267a06905bc53ee7dde1c7a931a30037365ded88d61b976d0c574bcf1a4ca2a2ff2e2acdedf20ae91e79bcc499eb50774232898a0c2dbcc784e6a4f18baa5036f4130e0c8430dac6e216e17c85431f6ac9644ec90f45583f56209f90d693114d65ef5989b98ec81c140a70468a439192dcb9d8d1e3d0209242ee142cc8c9eaff29c951b9ad6318113cd2e1045493efe2f5052e6c627b3d81f6f900f39fe9912ee73056fbe8dd2fb8f1013459c515b7908c0aa9c0625a93edb04e0724c1cb7ccffc66a15bb1e7810d696fabb2a950ce6b533ec87a8840ad47898f54c2f7eade535aa6e849a8235ac42d4397f8404bf2436c0480ca205215641f186e74ed8d04ef793b0fb457133b80fbaea3ec13094d1fb98b19923a483c25bda173cd3f438804e444bed852ec3908203944c708c60fb27a3d0c742cf8b4b18f4a3f9b8dc9d87000198f30f3d57d18095c3c589d710a1277afd00376504e1b8fc021eff540fb6922658e1958b7cb79937fe00c73ba1fdaebd331e622822ff57078d509d44960375371d749951ee319c320354e4120d841a4693e461770084016733fd8525538354d9b4231300188bc0ed8a46a1db9d316b5e67e45bba09c9dc623fa7f67e229402410bdc4391b73ccc9aeb0075c1884219fc17dd662bb4f9e4069761584a0703b3ab0c334c725d46205d6b276c6442d52d3c454798277930fe086f650c4dad1cae28b6c28f16246b9a56a1cc859ee6feb13c9c3058a89ee212be9ac3b2902e1c6297aa4d3b87a884f7b57d8e4f11671c31a2d6b63d54f18b779151ee512cfaf61af6e7bc477abd0a0625d9dfff3b3ce4b403909856e31a03998fb887720d081209c7e302bc8b5382c0d47695393058831b70ffe6424e014c7d0df3efa49166d1f0906911fd83d7b3e7f3d3d6ef534b0a279ecfbe36741e668007821b0f8a157cc64b265c286f246ea6707efc51b71cff1999c6441768840eedc9e698afd3ef738f61f7027c473bca098367a84c2f97505f9fcf2a1a469bdc6aadddb78d14717bdeae17b2b497adc8c71216ddab964937540591d25aff4b7db6b16ca5250677304832ac04d46a230840124e0cb768c6b6dc58c4bce83854ff707ac8eee83d6be5c51b52584ed1b874780c7ac559be76e34f4f1e53d1465c201dec01ad0cfdf69cc6a02670397201345fb111d5f2105030b662038ac268d1fefc8d0c27b8cf8a347765ffd50a8a9c29bd83ce79b807de04abba6107a6404daf5cd8114a8a72b0a6c8675e851506df84501159647b048d8e75c157ace189ceefa88bec8cb28dda4c0bbf91dffb8e459efb43cc0bf21f2b3df6c34b5dc22c50dc6ba88f7e50616dae26727b5cd094201e45ebc9add451d19e30c37f5a2b9e4793d297d405ece2330d887c13687c0877380f6e02c2c99a988cc172937929b51caf2ffb109c03fe1a9e110829a5d3738a0b6a40f70e80c338e38faf1707bedd34ff6b80b9448ad9fcc9f7e835074ea074219a5b7b3eaf717f84e2ddf9b88403cb9d7b106ed21061c22a8e5c836bc62e8c7d7e89146e114fd224d90ce7bf98b3a3003ccf161e22a02cfe596d2592a0e6d46b549d5992c69ece3b711495577ea86caee85846800ae623419b55089c8f4fc54464da66615cf18ee2489e1e9cda06915715a1ffab9076fba7845c7e9f279e94d515816aecc7ef718f4f9b58e8c31592df2eb3cb3f18fb323504dac608e816ae11b1e23ec8aea9e5881f2658beabd1e19d2c39db6e3f3a23c22e9ad47ddd03522e87a56dc35837ec5aab583b4adc9a6ef73f9189114cdf165fd7e7cebce94e24c21fadf761531ffe3338d74d400e588178d550d9a7e27036c84487b2944ffaa83d83d2290574212a5b68c7124f4eea7cdfcf7813085221ec5e33f98fad5031cb92af92ed1a6959981630202fdd6a11ab422eeb2e3a74887f1b568510bc29bbbce4128bdbcd86e95609571d2a0ecbf5e645e61d8b42d031839356c36ce428361de5bb631e9f3f81ca7291baa8d9e2eb70e3306687735e538e54826f0f570edb3bf1082676f2bbe0a6b10a87e74008f89d906222a822bb5124e9f44f7163725f3f4f62fd2d8347f700ddef66e16c4935c50be9c22dc83a5e4fadf07dab6dc48a5531af859d5773cdc898838647b506acf3fbb9743120503383f81a3c3736d922396a3a0c9d7c31781c0d23224590e93a2a8f6bae2c48c3c1cd25a6b3331dd42e76c6639ffeadf8b30e678bc3e2dd33968e7ad457d715e8a72eb2d87c2f8e57c5238b4dd13adda2ebadadd0c773b8636ce1fa8dbf2ef01983f8aa1f59c50251f51c168225f9affdad6c0fca6d8ccf9a5dbb2765cbcf65a782b24ef18c2579ae3f1681a1615c3bb450b56570507cf0e7f3d498f52a352e0001ab58cf14266e086f68ee6c45d598c2317767616a2ea70ab2db78c01e459992e8df001f30198727dc64f12ebcb56ec1898d95fd8aa6ef0cdf0e28a8a97d376a6a399c33f80370bc0e7a2ae5502389bac840984c0e1a857455e16bd2ae312dcd6e744cf502256f549a81087d2460f90fc2c2d3947324fb4c6b01671e054cb67863ad21e7e92a2ea6984bdc72aa77daf2a84aec4bfe71deefb5cba2ff3f0d3ee71b753d025f41bfe186c41520421a304cd501da0a13c70b29c6bd621dd7e386bdb94b85959f835c06872f00a88521820f18b730816c9ad35bd26df1675cccdb203475b0977c01dcca429b3ef98ed14486d1de1b1504db73632b1fe5f9b569ae5ca0afe95ffffecc22b286842ae3459f889918dc79fef29080b4b197d99b7f4a21424dcdec71275863c797ee039beccb82e9f1371d493ee47c0d224b0fbaff85a0451de359696ecf2629c4ca8bc649bcb834ddcb56dcd1e65c2cf1ab916cc482dc491dc3085c662861b046ea9249a0ec0e74207fd4dc422d67f12ae32a25471c4a34abb62205d6975a8cbb63644b8993ae78419d87c95674636ca3359ea073b13842563a6af66c356945e77a289c96810bafb4f9c34915476bd4817e8a52626b63d0cd1b16424aa12d6230f76fba5414ffdac66ea7da2f6c9731179bafd4cee443dd3d5de3b58a6f02d298d1916509c58f2f9b0074795a347258bd7095d1a3b9ebb584b6984593333986b66db733920c9a0a4e80b0d3b8613903ad144359d45e22804337353e0143869dca33bcd34a7cf78e52173b7940c1b61eeefcbad300944e3538e3c343c2ec24938ec3ad6bf5b3d656ab8d34d65a55c2fac2baf5c1f54abe648dacdf7c4bd955c41a5df0660587b454b07e2e360371db3d280708d7e233d4ed743fafee63690acce3589fdf9408a6d7c9a840c9f5e37669b4640de282f3fee71474958a391282dd968985c2292d7a79dba248847ce18b4bb1ba2959c964ef80fca9fbe9b86ae2eddcfea2ea4b722d25a3618f3b3d960c2fc94efdbc71cc77adfcae58d208d604e20d8a4526aec18d8dca895c54947a25cd7598109ea4f1e1046c9e5fb5393bba376eddb3b001f08eec8f8eca6051bcf9f6e9d4459aba50c5e58fe31f3430bfe86904569965dbe476dab0c5a4d0125c15f38f319089af6bb06c3ee9943d550b04525de88a8b1787652c504ee79fb726174478887817195254853f0a8d32a37d08487eec9e3a607de4d64006abd10dcb8e065bf4f96fb027c901219df0558f6738722b2ba2f5a2651dc6259288edd3e0772d17f2139e65344dc3bb2312ae981b0d3a333f2a569f3b27dcb26b516771963e748827f649450676973de3feaeaddd91d2354304ca", 0x1000}, {&(0x7f0000002040)="0fffd1853e17205b0cd00724e90796a7d9dcc66665d29823c8932e9986501f45b94a8478ae466c42a817ac2ab8d8e703e3c73580c88afdbf0c489e35a71fb0a195fe30e775fda4103cb5f930161c65c05c08a6d014017e1e29ee598f99aeba2a8b5437be4ea0c7f10004b42b684a1fc1b437937b2f3f142faec42097994e3e9a4c6cb362ef72c864c2c3f3ba2e45023de9e3e9937ad2f0acceb22fbb0b23b35da205efde4593dd81b65cc246729883fd280e7645ac159368593a18c844eca7b51c195d1731174686266fe5fea289f19a55a9e8d54bdaf773", 0xd8}, {&(0x7f0000002140)="5d08523c8af851f0f96d045a16cff5290020a05da0b48992e89c41c65a59d2798efbd0f95a27166c19021f891ab9d93e1ae7fe1ac54d4eafbfe9a722ed3e6570e7b9f1ad6e71811c8f5c494901d72ca0111c7a4e06592de00fb2c1c94ad9319d6f54e65261de8610bafcaa164970c772f7ff0b0f319d3ff91aa0dfdf92ec3b58e37c894a8a64bd97e10eab0eaee877ec1500662567263aac0ac725a5a9fe8a49dada29d8ac8c3d93e80fa964e2a447d5be3c8a304306eacc689b2b2dffd2a076a3bb1ed4ef", 0xc5}, {&(0x7f0000002240)="49e437cbe796a2f7c9765be899ec8624240e047c46850df62822560f5daeedb24758092b87f78acaa277af9c13f405ad9b6c3c1c98e30ef971152aef1eeba390878d71f13ceb8e69dc775ba6c1ca03de229deb0da394bc9363619d8534e1bb3464f567cfccb64ecbe83e239afa7cc406c2a96a57fb45e57b33bd4c1be0eadfd79c92c91f4f0531780f4dee3f712a9494cb27775684351757f7b058a12bb9c47f478048003dd30127e7b45a5ce99e", 0xae}], 0x6, &(0x7f0000002380)=[@sndrcv={0x30, 0x84, 0x1, {0x80, 0x5, 0x820f, 0xffffffb4, 0x9, 0xc66c, 0x8, 0x6, r3}}, @sndrcv={0x30, 0x84, 0x1, {0x9, 0x4000, 0x207, 0x9, 0x0, 0x81, 0xa4e, 0x1}}, @authinfo={0x18, 0x84, 0x6, {0x101}}, @dstaddrv6={0x20, 0x84, 0x8, @private1}, @sndrcv={0x30, 0x84, 0x1, {0x4, 0xb87f, 0x8000, 0x81, 0x20, 0x400, 0x80000001, 0xe53b, r6}}, @authinfo={0x18, 0x84, 0x6, {0x6}}, @sndinfo={0x20, 0x84, 0x2, {0x2, 0x0, 0x101, 0x7fffffff}}, @dstaddrv6={0x20, 0x84, 0x8, @mcast1}, @sndrcv={0x30, 0x84, 0x1, {0x0, 0x5, 0x1, 0x85, 0x81, 0x80000001, 0x9, 0xbb}}, @authinfo={0x18, 0x84, 0x6, {0x8}}], 0x168, 0x20000000}], 0x4, 0x2400c004) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r7, 0x0, 0x8000000000004) openat$cgroup_ro(r7, &(0x7f0000000000)='pids.current\x00', 0x0, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r8 = socket$netlink(0x10, 0x3, 0x0) ioctl$sock_kcm_SIOCKCMCLONE(r7, 0x89e2, &(0x7f0000000380)={r0}) sendmsg$NFT_MSG_GETOBJ_RESET(r9, &(0x7f00000027c0)={&(0x7f0000002600)={0x10, 0x0, 0x0, 0x20000}, 0xc, &(0x7f0000002780)={&(0x7f0000002640)={0x60, 0x15, 0xa, 0x101, 0x0, 0x0, {0x2, 0x0, 0x1}, [@NFTA_OBJ_USERDATA={0x1a, 0x8, "ff9a6d06aeca0ecc6d390465fc7d5874e1fe35d40c31"}, @NFTA_OBJ_TABLE={0x9, 0x1, 'syz1\x00'}, @NFTA_OBJ_TABLE={0x9, 0x1, 'syz1\x00'}, @NFTA_OBJ_TABLE={0x9, 0x1, 'syz1\x00'}, @NFTA_OBJ_HANDLE={0xc, 0x6, 0x1, 0x0, 0x2}]}, 0x60}, 0x1, 0x0, 0x0, 0x1}, 0x40) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) r10 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r10, 0x0, 0x8000000000004) syz_genetlink_get_family_id$batadv(&(0x7f00000026c0), r10) sendmsg$nl_route(r8, 0x0, 0x0) [ 2170.107099][ T7124] workqueue: Failed to create a rescuer kthread for wq "bond842": -EINTR [ 2170.239245][ T7139] netlink: 'syz-executor.3': attribute type 1 has an invalid length. 10:41:46 executing program 4: openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000000)='cgroup.freeze\x00', 0x0, 0x0) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r2 = openat$cgroup_ro(r1, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r2, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r2, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r2, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r2, 0x0, 0x0) openat$cgroup_ro(r2, &(0x7f0000000080)='cpuset.effective_mems\x00', 0x0, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:41:46 executing program 2: connect$pppl2tp(0xffffffffffffffff, &(0x7f0000000000)=@pppol2tpin6={0x18, 0x1, {0x0, 0xffffffffffffffff, 0x4, 0x1, 0x1, 0x1, {0xa, 0x4e21, 0x420c1ba8, @remote, 0x1ff}}}, 0x32) r0 = socket$nl_route(0x10, 0x3, 0x0) socket(0x1, 0x803, 0x0) (async) r1 = socket(0x1, 0x803, 0x0) getsockname$packet(r1, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r0, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000180)=@newlink={0x3c, 0x10, 0x403, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @vlan={{0x9}, {0x4}}}, @IFLA_MASTER={0x8, 0x4, r2}]}, 0x3c}}, 0x0) (async) sendmsg$nl_route(r0, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000180)=@newlink={0x3c, 0x10, 0x403, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @vlan={{0x9}, {0x4}}}, @IFLA_MASTER={0x8, 0x4, r2}]}, 0x3c}}, 0x0) r3 = socket$nl_route(0x10, 0x3, 0x0) r4 = socket(0x1, 0x803, 0x0) getsockname$packet(r4, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) (async) getsockname$packet(r4, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r3, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000180)=@newlink={0x3c, 0x10, 0x403, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @vlan={{0x9}, {0x4}}}, @IFLA_MASTER={0x8, 0x4, r5}]}, 0x3c}}, 0x0) sendmmsg$inet(0xffffffffffffffff, &(0x7f0000002f00)=[{{&(0x7f0000000080)={0x2, 0x4e22, @private=0xa010102}, 0x10, &(0x7f0000000400)=[{&(0x7f00000000c0)="d8ec7d9fa424195f9c3a64e4bafaeb7e6b2330c8f5eaa3659da77916c696d802fa6635499c23610822ca1704321816975d9f6a7dbd4b2aed1da06582ee349df40e18235f1f400c1432e43889560fdd62ef18091439f03ac13020ca7338de1f1bd491b27ddb80133b725c7e66ad23e7c6a83185754837e5fb4127c0f64315cedc77fb84ad529a2fc2ad42adbe900c58505f50b43b6e7c64b0c46cc5123491d7aa80b9fb7dfea2028e1a7d8ba9666f9ecee2d82770512a0c3dda5b130c4b9eac890186f5e8b4fca3e5fa99aa93986e56f3c36664bd372cda04e450f7414134689709", 0xe1}, {&(0x7f00000001c0)="168a8f6e4fbedce58dfe5a22a94590ad3af8885bfeda908668194dcb13e4eb0d635a3cb72f9a4679d8d6e18dc3d96aa98e93c0a47b4b219ef688311f505615d9434375028c38eb81a71759bc510eec02fc115e3a6e", 0x55}, {&(0x7f0000000240)="5a16536a961e7cb033d1240c04a125a66da07d3ff6043e8e4f030d015436bacdc65f886164afdbb2152e3bcc0dc3", 0x2e}, {&(0x7f0000000280)="ceac996a52bad385e6c04aef76525f0ef3507383165d7aeb41f3bc409e", 0x1d}, {&(0x7f00000002c0)="2f83708fd6523d", 0x7}, {&(0x7f0000000300)="60f9ac0bb60fe922361403126d204f3f786a246a2bf8b6c07e087d4ff4e7343c1d13dc0a5db99426dd003c84522da622a0a6487c848562a3955e22fa3cc5cae984358fd6de668740c47a994904b1c1df6de7623a9457a6c6ba0b676c0bef6fb15b59de8070ad586cc0d2e29d0ff5bba124b1b289e0d687df0d1ee9d4c543a56718adca44c37a7520de17b9c7115417ad20ea75463939a175b8404ded5872fcd08c3e516a95fd3094ac9915d018073847cc4d1aeabf29933ecd8be6f264f1f64a3a4924a5d073e981552c74a4e2b26863aa3bb81a74c8b1def4849578d19cb9", 0xdf}], 0x6, &(0x7f0000000480)=[@ip_tos_int={{0x14, 0x0, 0x1, 0x8}}, @ip_retopts={{0x30, 0x0, 0x7, {[@rr={0x7, 0x7, 0xd, [@loopback]}, @ra={0x94, 0x4, 0x1}, @cipso={0x86, 0x13, 0x0, [{0x2, 0xd, "a4249189b335727d168d12"}]}, @end]}}}, @ip_ttl={{0x14, 0x0, 0x2, 0x7}}, @ip_tos_u8={{0x11, 0x0, 0x1, 0x3}}, @ip_retopts={{0x78, 0x0, 0x7, {[@cipso={0x86, 0x16, 0x2, [{0x2, 0x10, "fb2b75fa70af7a9142b880a15bcf"}]}, @ra={0x94, 0x4, 0x1}, @cipso={0x86, 0x2b, 0x0, [{0x6, 0xa, "c6e24e66ebbfd79f"}, {0x7, 0xa, "a4d9ad756f262e50"}, {0x6, 0x8, "fdcbd9e487e7"}, {0x2, 0x5, "ee3901"}, {0x1, 0x4, 'E|'}]}, @rr={0x7, 0x17, 0x2a, [@multicast1, @initdev={0xac, 0x1e, 0x1, 0x0}, @multicast1, @multicast2, @dev={0xac, 0x14, 0x14, 0x1f}]}, @end, @lsrr={0x83, 0xb, 0xed, [@initdev={0xac, 0x1e, 0x1, 0x0}, @rand_addr=0x64010102]}]}}}, @ip_tos_int={{0x14, 0x0, 0x1, 0x2}}], 0x108}}, {{&(0x7f00000005c0)={0x2, 0x4e21, @multicast1}, 0x10, &(0x7f0000000780)=[{&(0x7f0000000600)="20115f08813118b222b5eca2986855205cd28336d49ccb8f21cc8e9fc7ebea2333b2824c8ecf12e41a96c74184768b17e148ff00fcc14e4eb1c22ad02d7999271e33ed5db470b6b03ce7c98e801135455f25cdf051abd115dfe66c009d0e7ccfe30208c746ef1cc4cef1e321d9c5b6558644d1995e4f0df5eca3279bf506c08edbc843d86d25e6ac59c4c3fbcc9fe90f10a7e341417c0629268a1e4c7f955ca1a0c4fc494537221870048e519653ab6bc1f28836c5647396e37d8e", 0xbb}, {&(0x7f00000006c0)="1e9b03f4ab770e2f345b44e67d2f9273935cc4fecb8be550ff1825208e39b8c6b1b545f92fbe77dab8847e3b873b37fb26465b45195418f2c7338efdf6a9714a2393f7553d9690b271b2107c38f2e1b8e59efa717cf406b1c6c44c378604aad1b0216e76bc425f68db981a07bff390be36256c575752c0775068118f3aaaf3ecda36529a6274c8ada043d2463afb6de60d729ba40d081cab415db0dc83208566ff185a669f526876367f27cd7a86c6b43d8d2c8923385ceb13ae4d8126569eef", 0xc0}], 0x2, &(0x7f00000007c0)=[@ip_tos_int={{0x14, 0x0, 0x1, 0x7}}, @ip_pktinfo={{0x1c, 0x0, 0x8, {r2, @local, @broadcast}}}, @ip_tos_u8={{0x11, 0x0, 0x1, 0xfb}}, @ip_pktinfo={{0x1c, 0x0, 0x8, {0x0, @dev={0xac, 0x14, 0x14, 0x43}, @multicast1}}}, @ip_ttl={{0x14, 0x0, 0x2, 0x9}}, @ip_tos_u8={{0x11, 0x0, 0x1, 0xf8}}, @ip_tos_int={{0x14, 0x0, 0x1, 0x20}}], 0xb8}}, {{0x0, 0x0, &(0x7f0000000980)=[{&(0x7f0000000880)="0d5e5353604f9ee83da8cae2c35ac231465cbfcce8b09033b0ba696423d2d401332d756731ab99d1bab1bbcb9e4652060e0b935b0770f518841bbbe671c8ead596d4d8cf665e85f466cbd5c847cc686b6c77d0466407bbe23e5ac340899902a2606357df396663a81d4644ca79f64286fe9cf772a257e28857c01eddc37ee99d85d84c5b739cd082cef688bf0258f8210a84a18daa6c8649600e322fc52eaf16bfc4102c2e027591bf8962b7aea27ea2c9376c9716901302b94d34c62d08c68c603481e43bf85354e7bfebaddc1edda220ee70", 0xd3}], 0x1, &(0x7f00000009c0)=[@ip_tos_u8={{0x11, 0x0, 0x1, 0x95}}], 0x18}}, {{&(0x7f0000000a00)={0x2, 0x4e23, @empty}, 0x10, &(0x7f0000000c00)=[{&(0x7f0000000a40)="ff312cbe2acd447fa1", 0x9}, {&(0x7f0000000a80)="90550e63f1968150709fce1196a06a85985b9f14ad476948c6a19b762992e9b0e9f64e3cca9efa1184437159cbec69c4c6e9e7d82dacf105477e092b1113cd0d8253a7f671a7b7fbcb2bbfca9d116299df6a686ee8ba3da33afa8c826afea75405cd861d383a3bcf49fac5b96e23c559c3062d13223dbe85993ae4a527d953beb478f62d468cc67d4c96909696b8e01403d20b5dd8b872c1d23dbd0ddbe4c7d0c8b81ed79ebadd33761c07acda446bc4f6818f55d469137fd519a055c4f87608bb6a3400ddf21bec95cb4094d5afe742b9877b187f", 0xd5}, {&(0x7f0000000b80)="cacdd15be640ccd6421057b3229585a34a0ebbd61744b6e959528dd9d11bc4a78e38d4baa80937c46f1af2882a70e457dae8657d90d477aaf4c4a26e3659563d59abb90b4aeda990481403222eb0", 0x4e}], 0x3, &(0x7f0000000c40)=[@ip_tos_int={{0x14}}, @ip_retopts={{0x58, 0x0, 0x7, {[@generic={0x82, 0x11, "08895e5ed0db3aa0a197f8af0ec372"}, @timestamp_prespec={0x44, 0x4, 0xea, 0x3, 0xd}, @timestamp={0x44, 0x10, 0x63, 0x0, 0x7, [0x80, 0x70000000, 0x10001]}, @generic={0x89, 0x9, "a1d47fc3272900"}, @timestamp={0x44, 0x10, 0xdb, 0x0, 0x1, [0x81, 0x7f, 0x2]}, @timestamp={0x44, 0x8, 0x74, 0x0, 0x6, [0x3]}]}}}, @ip_pktinfo={{0x1c, 0x0, 0x8, {r5, @loopback, @initdev={0xac, 0x1e, 0x0, 0x0}}}}], 0x90}}, {{&(0x7f0000000d00)={0x2, 0x4e24, @loopback}, 0x10, &(0x7f0000001100)=[{&(0x7f0000000d40)="155b0e293cdf53a13b92793b576ad536a1e87d3966ad7f123e8b6559ecf771c91d1aa1b1f681d0dd1b9b519d501fa654eb251916edb1bc2cd59a947a5126b90f1a8b7904b2ce2594ba33241ebe7f6d4d404926b43d18c2711c1cc41371edceec88a1ceb4d26101b4075e6c859620027f0c5ebed6b42aa367b18cbcdff4148bf4681559671b1309f5313226bee337b6e5c12b5cdc7d62d177b5699850e78d31e4a3be427e30dbbf3444a119a5ead25e7f6e0f4dc4f5e36a6c949be1a370eee7aef4c010af65888ed89f9694ad819bcf72c3067c00dcf0f8d1", 0xd8}, {&(0x7f0000000e40)="a41204ebf22a77d7fd6749a917b13847b269240dbadef590486cd1dc55341dfa8fdac96337ef98eb32792649cea4b8261d6905a91b67cf4af258e670", 0x3c}, {&(0x7f0000000e80)="52c842290eef259d1d9abaf29efd7791f5ffe8a118a5c9127c8ecec98cd512567717b7e3735d2453d939146977c5eef5a0a0e79d3c700e78691a8be1a53b6ccef44036a9ccb3573cdc6d4a449d7aec4b881dd09ddcb1ef59d38057ffedf9034108713f4fec00a71a4fd51bb34b56906ae5389aff", 0x74}, {&(0x7f0000000f00)="3470a5466304318982f8f58a55f18663290a241d4faa4df6865c2820a47183ebb0695271d8f502b502dbd74ee52338426c4d8b40e47881c79aee9089b4f76024f48653e5fcd68e729c10b2c0a79aa4ffa9b1f990a274c7e3ee3233f43a62b4950fc52edc0ca963eaf99513afd8aa3edaecce197794dc5bb0f02f05350e27b5d7c80e89712ac5fef005f2e7dbc734ece0703f52a69bf4c648533d8f5223879a043214d83b0b7058d567e94dc6acce26049677a0883ed8edf933584956d00415c412af6c2c7f44ef6dc566ea23be7651cb1ca2a5fade82e0cde50a4caa0bdf6e9bd476431aca56dc0551a474154167facb", 0xf0}, {&(0x7f0000001000)="0128dd099555df5990e6a3d826c1313c87d7df20f125e4365e540f1510693923736bf8be64ada1b775f7a332cf89de483ddfc1c76abb99ce15ee91f924b9643bb4738dfc60218871f750642f96004202b5e34c290d837ae3a514a85f757e5ba167eef208c1985af4e9768d5ccf5c852f5b621a29317b3e1ac1261e0672d88ef5099c40ed04039e7fe60e57184e28b1c4fde20070f791949ff31dbc1b75e87874120002659585b27382ab3af7f35d701b429e6c689b85f74f933e15b212db2f7b21e42321901d1a5371d4", 0xca}], 0x5, &(0x7f0000001180)=[@ip_ttl={{0x14, 0x0, 0x2, 0xca0}}, @ip_retopts={{0x68, 0x0, 0x7, {[@lsrr={0x83, 0xb, 0xea, [@remote, @broadcast]}, @ssrr={0x89, 0x7, 0x79, [@empty]}, @timestamp_prespec={0x44, 0x44, 0x33, 0x3, 0xa, [{@loopback, 0x5}, {@loopback, 0x9ac4}, {@empty, 0x6}, {@rand_addr=0x64010100, 0x4}, {@broadcast, 0x8}, {@rand_addr=0x64010102, 0x6}, {@empty, 0x8}, {@multicast2, 0x170000}]}]}}}, @ip_pktinfo={{0x1c, 0x0, 0x8, {0x0, @empty, @initdev={0xac, 0x1e, 0x0, 0x0}}}}, @ip_pktinfo={{0x1c, 0x0, 0x8, {0x0, @multicast1, @multicast1}}}, @ip_ttl={{0x14, 0x0, 0x2, 0x4}}], 0xd8}}, {{&(0x7f0000001280)={0x2, 0x4e21, @remote}, 0x10, &(0x7f00000015c0)=[{&(0x7f00000012c0)="9894e623a1056c207b8ab98ee54adafdf085da81a37f06af11af8ea35ebe32a909b6dc2a00cefe9ef6f0e0dbec64094699849cb3f1a87563c70be6221d53ea7de110fc3b5d71e98e7401a537731b1d4f39a0a55601beb7664f56cdb8ec89f6f3abc1d7090c87cb480e1a2531ca2b1b134099eaae57774ec144fe3af8f841396fe0bbbc3ab65257733eee9b7bbe9623b9bb4e6068b59580329892", 0x9a}, {&(0x7f0000001380)='o', 0x1}, {&(0x7f00000013c0)="372092a93ca6119d3657481739e554ecc784e9a8166d7cf723b856c367401a08dbdb6b27c4eaee0a937c7be88b4aed903a301f7fa386f9744a97a598e340b047cee5ef12f3e2b760ffb767eeacd3ec6b115dc33be519ab02e61ec224ae64bc77cd7b235d3596222c0179291b68f2b5ab02cd09970dfd2a85292c7103dded0dcc7a6322be8240cd19cc495cb94c502ccec31b18df745bfbff187851d33dc939dd9ac69f69d5f49f714b85c9c832fb4af3606b9da34c63f14f0d5c9a20eaa8791f9526d4b263d8e098ca94e77a12a8fc668450cd13a8c8b94d11b8a517d8823462694baeab3978bfb8d692daec16fe55e4c833314db655bd08c3a10efe69d11a", 0xff}, {&(0x7f00000014c0)="7cab5402e8cd1d8ae1433f85af65db679bb38e60c7a2ba50aabaec9f927081a7685b0daa7b87ebe6ec687d6f33c8b92003fe9c12d3979f764b6bb565290ea7492aabeaf2c64e94f9e31b51de46bc003d1625cbbbb0255c50447d1b9304f9b4313557cae3090b0f52b4fe28f22ce877c8c0c85cd26438bdc745fdb62d0f432587920eb0b462b7de1704d24688e87c78718cae99ddeee00ba5b4c9abecd17fc7fed85380e1ec3a69ab4112b78ed683c37024556b10d03463089aad68cd94de", 0xbe}, {&(0x7f0000001580)="a30f6cde7c099b41e4fbe798fe", 0xd}], 0x5}}, {{&(0x7f0000001640)={0x2, 0x4e23, @multicast1}, 0x10, &(0x7f00000017c0)=[{&(0x7f0000001d80)="b242ae519b1247bfaa2deda50a201e1cb8fe1c3c57219d40cb9ef9b02dd694e065e10491a23645cabfaefa14291cb90abb516fbf3a62bb523f9ad6766b393231713de473237fd66ee1c8eb1bd6d600e02c5b30fedfa9cfef79c1e4df2da2ec50ee85f3d656561050c2321b34e28e6f21776f0ffec02b1d8b58e044fa5318a5de7edc92e652cbdd58e67da5677d637080cc7d9ed94cc446da2cc365161c3f43fb07c4e3ef85b34bddf8b439b0d048a3be4a7445073594aa29ac4e84afe712f6cfb66167847f91503ded977293609c9758e2dc8a3abd6c2be3d2582bcfef777406a3fee7ce59eb8d3cd2ad8c23039f1e7897ace41b1ba13af107a87a9e1ed108b313621bd44c94872cb6da822740f6d98b75cd5a8dc5f60e46d7237fc0e2d459fcb77f9e86124bc10e8b5068c1e229208841d17fa7dd052894d71c481bfed296d9a0e12622231342531dd3825089133598b5c20af81cec3823809a6d03f3580b6249770adb38586987c50c9bd0f52cf8d11822b7418dab1319146441199eb3a91b3a36ebba53c313eaa8e3c28839d9c80bb9e5784674b2927f1337275f541cf5e10273e5f9e7f63fcdc512b0ce938b720cb428c7e18c9887e37c6effa4653671b265939ff0f2285bf8bda5329f01c658b3d0f3778d4f3a4275993edbf0aff4a1910d1311e104a07636f3250d2af8e2a35d7d6cbfaf2b2c4fc5fa99f28ada31af629e1f3ef2cccf03f1cf1eac4ce4a7b9842b84eeca6e298367385b8e4bd22dc7595274a5ae0ea6e248af8264917771aaddc027a3f3f1231cf57193eb1529b0beff62c5de22b749e4a49e725d2c17e66a95d2b7aa7adb35fdd3798bc729fd46c6e8db6313a50a63abe2516d3f78241275c36166b461fdd5983426bebdc9a7890d5db21db45977d375d3c70f796b3657be646da9f5261c0e1552857f592dc26c0a39489e599bc64b217447ff52fe3654fc213d3272c5a3b6e5b98eaaf2de717fdc6191a7fe1d379ce8491598a3e233d457e82174925f1b0fb3b3d9029f85b7a6a4d68f63daca582d9329f33fa558531f756d0fabed5da64a7aa2fa593b29693bddab21605b4563770a48793349c3d27deffad6e327748776f8af4054ce945fdb7770e693086d5bbf6771feabefeee86a85aa36ddae621b3a8ab4a0db525308ec774edba7f6fdde7f5fefa3feb7efb5472856ab46e1fac3f13bec4d19117cfe1f997df7ee8cf60f6262b4adfcb30720d7d13cb635f1d89b18aa8206d02d214c7d0e2c42ffecc6adbee323858e7f9ff3c8114e2f31c4925f9f684cef102a783dcc305e4594cc183a5f61322607cfe341f705bffb52967588fe6ea10eb502465641b74598470eaa1d0c3eabc62a7ae70a6950f48449e88cc94bb5fcad57b14195ddf0921e0d2fe5232ef95b969b8bd95f80b25ebc07aa4f7debaca6601a9ffa74490177e1f5a5d554e51740b5ab27ceb5dc4791d3e942e7ddea5a87b679d7aff9432f558d75d3199ce26fb65573f3cf5bb88e48645d2158a89d3501afd99b9ee289fc93be48beb20391717ddaae064a36ac49f5ba9b5721292e4459899688912065aac776ea38adc9c7e30b0bfa66f523b7a4c502e6b9fa1f93ca9a103b488534e83124b05be4007e7e615a056a62a593f65bce54e5f3dee9cab355b770a4a94fb2c774130e547cdc444406be4d3c415ea8610315f00fa84e3832dabeed31cd2a07e0e0230ff7a1d833e1c00e4d2d2658cc0519ab4ca90b80b640646983f4217117c7031138a360603e1e7c0c27c2e89954415e3c14334bf338817edbf0c84d4669b9fafc15a3709a9e5ff4a5b319d54e716a3eaa53ba488b3a9fa6eaa789984d913dd939d2067a84c634701f595b0a84512c51ca1fd5ee284f9648b9f5f8ab76008f1eab3e2779853bc628405532efbe57f70b3faca665fc7d336cd615d86f8613586b5616e0ba993477bd8dd3595b0f105f80aa9d5ec024c177245fd98fc5d19a288a590b5414b98604c845ff2930f0ba9267015ebeff3b0933023557be6b910a4436754ae3d25d49ba56546f5f59f1780873baf508186d1731911fcfc93b61f72659f2a2e61e0bb0ecdfd671f451a5fc43961614e793ba82b5e7903c6f79a466f84719981357c964d9c5f2e923e7e98266f634d6ca0200c88b0d8d369fffb4cab287f3082d6eaa546ba886abd40a444b28b54c928a2b6f093f6da9a7f96f1706b60386b9fed854cae565d0648e93019e86c122225fd0c539c3e2c7d19bf0e775e34b5ef905f81d21ab2ce444cf582b285e4b26568da46432e12743df4fa738bfcfd800e481fa4a13bee917fee38d4bcda52c5498febea5f3bb2d366482529f3e65edf67eb6fba29478cc9752a9ea7e5fe28ce87a63fcb2ee87b4a2e19d0cbaec4d51afc010c191ac870db03ca2f418dbffffd2cc6c0c01a2a343ecdd0080bf9b3f7f5df3a3d1df51223c8c86c5edf206bc283fab9c06b1d54bfc1a1ef72939d720f3f2fa1566121bdcbb52f2e10270ff344d2d0c77fd227dba913596927299b0f779048853a64744cf4a5aca78fda716cec87802b07ca6dd2f2a95b779fe11252b4bc54433635f51c94ebbd6cd513c1d1f62502fc57f8e942e4fe8c58600abc9a198fac583bf0ee3b6cf0989b9344429d41f5f9245f8319fa0861dad9115c1e5225a61c9c72843c42687db5cb59e18a3fe792bbb2d0d35eabaa890a097311f89a7deff5b128284d964db87916f07a4cb90300ccb6dde4b38dc31c290fe20eb0d7e8eca10f3e9387916c809d4d05c63b1f88dfd55250c3fc5195e2288ac280870465515b3870541785f301646390f901e1cea1cbcb3c793567581161b4c97871b90153493c0568db59d90fc444fe8e3cc3f2251b1fa5070451d1212105e730db436aa9710dfd547dcd40f0095f083eba224b16e0ca0c1be78d6ef1a2405d9f4cd6fc06b5e20bb2d93bc069b8a081efa54bb4160acd8eee514395a8a2b4f72b8356538fa63868ee3685920380bac708211bfd66fc5c883889af14305a8896efef7b1c7278c00ef4d9c472632f2b7d660e99b6fedebf536328231cf2ed4cbcda4cfa784332a9481d71b4f8b0d1f3499d0a64aca015e2bd7bc47bc2bac2382577d8716ee2912c48a00f765c17591e82a484959a4e558b5581b1aa159a7597b091b3389f8727f507020feae305c0616f8f6ad9d9aee13712cf9fda696959999d84ef50b1268d3b7565b0c220cd4bf06caf76aa7799097907344156a407199500ad4311deee361728bfcf753ce4fb404764e0cb40877041de077ea1fc1ec0115af1e2b2181a511de8012e0b56375df11f8e0b1f6ae6c0765d2907890d72493b342f61b47ad3c7513ea09c2d08b4d6c2ebf71ed8292457161a80c9efa8fb7ca7e4621798bc35fc26c10c857e44c0d7ea63084cdc029c9d809a7cd7dc6ddddabf0c8fb8c3560b3214e5488fcd1eca5fc2608eac431f7b3f8a92316056a3850941fe062713e4590841e3abb7216c5b56658c78151a776806e0bf52aa07d826e07a6a90f6ba4ba6eac81f81c41a17cb55cf894ad19adb5b39f8c865adaa73fd1f33735819d8c9cf50096f5065dd82e2805a03da4de2518ded5b1038e383de49cd2e0f1d875f972898381c8ec5bdd731442f5b4bbcdbe1b44086eb25015557557fbdd0de79562c9bbff095f59d71b35082610a5ce3a153e3e59d972d68862cb7ec177991371c72a422c5df2882e098c1b747b960e353e995e36c6832f27d5e29babdf3b74a79719a32447a885e1adfb25a74d6ae8452bfad0ff832a889f47fe7113589bfef48d2b88ea94363a8a836267e6c9eb1db6cd950d8c0f3469c1565b18f56ede3d4ffd2c5b3a878d8b7026a243fc04a2f12148ad0a5807a7d4adb61c6e3ed457bf3219cf03815e589171c6b07426dd0692abdb91a663e3b37b4e11d05181299d63549ee0e0472163a1aa65b45f058d07dccc688dfb20d8523dc2898451ab5c1dc5bbcc4f2aa260b2faf3fab37685fc293b3119deca3edc9ce28eb584cc904b11e745c5319a95798fb0ff46b27c6ef3d3ba843050dd2d2c025cff6db1cea1fe5f9bd94c7e11f6af357ef994368b848b6e03baea57c2d2a0a207f26e7a256c7e50c82b2421fbd1c2c27b82f62497a0212e4828af0f1dc104be93a4a39afc3a576c950ac6d10bae80950b3d02bcde820b384df4a3fa33edbc25aeca61a7f61b28c00764640a0b217619f7394da3c746b5e0ccbfe2429ebce3932b8600de6dc243251c7de5a7707669d0ffb9fac49a146f5924805ce2a70c282bd5584fa45b65eed893c80c8fb3f4b86baf4c3bb7cd788a1461944c51d8896a38690be2b6497fe5ef791a11f8121a5b94c554883eeaa972de6e6c314dbe35978b52787668ba1c2e0c94dfec878b4c37f8dd840b6be1c425f92b94fb79d1261bd36a573f17a6b3a474714720255174233360342d96b66fdea23dc1f22a54c3f93e6ebcde31096b7664e3fc2c74db439b8dfee46583bd1b203a1d8746682a6a8413d8d0f84e2075ad08c4aaf26b0f27205d95aedaabc00de025a6ab48052be521561e6d09807e551b1ea9141fe52791df4553b7567d1c72b236f545fec240d1a5a641cd1ce94ea7812a8764d98566e5656875d8822586bb2c2c6a4877b5a7f3a1f16f46b86bd7f2b06e4db1abe4cd3c4263b559bbb68a25510b8f605d191bc229212d06a753ac3b57c7635ec0a88c5f17f6a98fae26b24d21d710c1c81c511ac12f427c70f77dbf003210873b35ce1c8e29fc1928eb8e9823c1b7707192ea6e00b247ef545ced0b62884ec974dbc1f28abd7ed00c9e1d33bb8041d9d51c4629c760f6e9b87f41138ca45e0a111dcdb575f331686f54b60694b295fcdaf7b3b8df7e1bb3693214092d0780e3b8e0fea3f935131f8274bc1348688df4fbcf983b2a7c5d035fa94cbe6d7cfff32a59fbda1a9f4fb3a494050ac41fd9946ce1b8c733e737c2327ce99ac27a621967f9940394202937f1b48970d6ca533673f7476aa3f80c18737e60dd27c136311983642789ac2f710397eee32bfd63f0abdfd18a61f12e1701b2d2b3e55870f36b683b91dc205b7aa96f99621b238745a05e646ff3427da96e1ce8f6733e54f6683893baeac0e83c410b0ced1824b26c6ae341f74864824bbe83507d70b689c3ff2486a3dab0ff82a621d5771fc31d9b1b4d652c2bf3c86086561e0322359a53d2e736a1bd8cb47e28a328b4f1fb108f57cf536440c86ba7d12a71e512299281846543eab0bd02261f4bf73902589c7b5c89071a77638c523728286088bab0ad194c44bb6e8bb08a9915182b61aa3ef11e7e1cf418895b2ee5adff7c6ea71b05084824535432cc9d45ef980ad5e001a882916318166df12185122937799a7545a1821e67c838a6e7c3338c4103cd6d7f364468867da71a38f181ed0a01d41282d6d270b1cfb5a885941d2484e9da9d6fb6ab39e0381ded7863f8161b8ca38760cb0643c9d7691bffcff4d71522dd86cbac102c34ab5907a1a818febc5c65d65e51381f845afebd3d0249a8a1890017e7dd1ae7b0c508b6a7d5525d2c3f8321dd43a7ad7c23d483778739a33fda33f7636f2301f7d100092576a66229e701834cc426ece28d3de1939360c294a6ca9903146fd4449df8ac56830c5759c1512211571d7c91d7f53499f1545ce4e0c99385c960750b1b6474690adb28a5cfbfb54f0feb3492e74af1cb829cb3942e04a4f6fdbb0004d170e535d059f8c51255c3ba8981355d4d2a8c7396412572a0068e701539be49e4253b06900ff2f9bd02c197d831", 0x1000}, {&(0x7f0000001680)="cd5abeccf78bb129da687e638e9aa4980777db027eb8e6ff4bf405684aafce016154df2ded", 0x25}, {&(0x7f00000016c0)="e0d9fb571271e7c2e4818e7e2fd1fed7590fa04e1137b5e1dbca0a3896fb29b9bf55813d22c381758916beefd5fc8dfafaf3824f19acc3d0218260166fb57950c6c9c23ca817a1bfa07df1936f2f177e2e29609b02ddecb8ba3c4a3082937b4d996eee30061d35460f899f17e3dd334b308d98289a12aff49c7a53785984761ddeec3d5ebd49a785a056baef0f577e1a7a1c656d00023397d0e39d0d32901aa1e04f0d1e6250755a53f2c2f5f381c6bdc085285c40a5b1c2d4970581c893fb1eaed11f43909badc1da7cc39cbfa1ad2c50b4a8bc59f52cd494b0d51ca93b53a4fcf4df16f5", 0xe5}], 0x3}}, {{&(0x7f0000001800)={0x2, 0x4e20, @dev={0xac, 0x14, 0x14, 0x18}}, 0x10, &(0x7f0000001cc0)=[{&(0x7f0000001840)="9a898dc601f47696cc62c63bf283daf15725e2875b257b7e767a3a2068cc45c20b71d4cc44d845e5b797b55dfb14bc8b25a0fdfecb412c54add392361e0eabdc1459e668976aa92e89f9e4e0be99e6592d8a421cd82694c98072ef0adba04ce56a8552f405fad670c7378945bf5be5632e3ef0e023f7bf27508a7833044d8ce8bf85673362176bb3104bd7b7aa434fa5d585c3fa3fbc2fe50b396c7c0817ddf3934f31b211de8b896ef7d333a1f55943705b771b252d14330b44ddb23ce78522b888289056c49a973185", 0xca}, {&(0x7f0000001940)="2b1a12d822554c41bf293dc02172a3f5746d18f8a48277c0d67e083264109149febab0454e0a135a4b03cb4a870d83fd4b7edb1f61a1fdfd3bdd2cc78db45ebc9a7516c3609be38abb7df7cc2ab0f0c766191b17c9c6e815cb3ab53cbf5e5f5b6f5c54b4052fd78ddcb177053abc82", 0x6f}, {&(0x7f00000019c0)="b316d6cad8674aa5baee516b9aaef9102a94fa59325c9cd3f4dd57fb45359dc7d4c21a7022f6d37ea73b6734d38c6a1e7cb7685cab04a548e7a77d186410c9937b5f58f7af36a535e2ead100014ac33d61e54d7f6227e567845fd41bf83a0b5abe5b4fd68ac301f9d7fcacf90df9faaa4d22aacd60db71798e665bb08d70eb26dbd7eba9108a135d1b51408bed3cdcfe6900ebd3581eefed35aae079e023c2a373964f3f34387e1ee68667a612f9af647d6f77befd9326437e0a66fce174e60ced405ff0eb63676a661bbfe16075c041725de61e628326981ed2785088f5c11b", 0xe0}, {&(0x7f0000001ac0)="7f4ab5a9ff0d9379eb8a6d8b02d9bdc4f6169dbd8794aa6994f310318b068f08aa2bcc6df2a69c3e463d7ef726ad27102f0fb8d8112e9cdd27d03e6bc4abb322c15a26d3339f57a492edc80f8f1ef9a5a1cb477152f42275879ca0d12c2aed8c35a084498bff7e5cda1478cc55b91c5c57c3f162fae3b57c081e10f59f51bbcbb517ccd113f07e43b4d992216487499d6baeb058cb7949ebb7d4ad46e015d2749fd25bb2e241c6f0a06fe8554e551ac7cbcd084a3aada5ca73903bc8c5bd", 0xbe}, {&(0x7f0000001b80)="367089a2fd8081982423736879f95a497ff068632920f3e3af2ad7a2f7cca1", 0x1f}, {&(0x7f0000001bc0)="1fb0b276de9a9dd6ce297c49b87b1518779b75abf9721e48fae3f9fb857675261dbdc9a31522bfb310d3f35fff0ac009dee6bf2f051ce1ffdba3fb7d403cb7ec1bc6baf8bbfbddf1cbe2d0549a1c0476cf393d2eaac14488fdd3460ec80d2d4b9505d8bc503ce36ab482870b4e054b16a3cbffca07945a53e22ba6e8bf8eb69782278eb480d5e30bfc26e12f1d64c27d35c0903a3adf92ce59806c8fe734be0d961ce8342117b618201f344c14fd12b49fc5f1487989ea7759f527a2babf61812e0006e17dbe433eccc3987e2f20a378a39989bfde55b0397cd1ad", 0xdb}], 0x6, &(0x7f0000002d80)=[@ip_tos_u8={{0x11, 0x0, 0x1, 0x1}}], 0x18}}, {{0x0, 0x0, &(0x7f0000002ec0)=[{&(0x7f0000002dc0)="0b6eca461bd70e01d19b7ad22f7e32c3c8cfe40d80ea3f5fd61900dda9d525c5a29a02054155ae160d266b0a88fca90067f2c805287592edb537e7affa4663e4e5cfde9464135268aa1cf5468257846a429f33433e28c56d27d5181691635812723b3dd0de2f99d020f59c6fc3feb8232f45857f6be13655fd0f0b3605ca6f312c2d2a9153e3b3d7510cd932a585f12c468b98bbf515b15e79b69202e556bbe96bdfd62b741f16d2f848", 0xaa}, {&(0x7f0000002e80)="9702b6e9d94830306fdb5dce81785e8af507a77cbf", 0x15}], 0x2}}], 0x9, 0x44844) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r6, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2170.338941][ T7139] 8021q: adding VLAN 0 to HW filter on device bond1432 [ 2170.373044][ T7185] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:41:46 executing program 4: openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000000)='cgroup.freeze\x00', 0x0, 0x0) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) r1 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r2 = openat$cgroup_ro(r1, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r2, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) (async) ioctl$FS_IOC_RESVSP(r2, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) (async) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r2, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) (async) write$tun(r2, 0x0, 0x0) (async) openat$cgroup_ro(r2, &(0x7f0000000080)='cpuset.effective_mems\x00', 0x0, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2170.491064][ T7149] bond1432: (slave bridge1332): making interface the new active one [ 2170.594451][ T7149] bond1432: (slave bridge1332): Enslaving as an active interface with an up link [ 2170.620285][ T7202] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:41:46 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xee020000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2170.701264][ T7180] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2170.775462][ T7180] 8021q: adding VLAN 0 to HW filter on device bond1397 10:41:46 executing program 4: openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000000)='cgroup.freeze\x00', 0x0, 0x0) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r2 = openat$cgroup_ro(r1, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r2, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r2, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r2, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r2, 0x0, 0x0) openat$cgroup_ro(r2, &(0x7f0000000080)='cpuset.effective_mems\x00', 0x0, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000000)='cgroup.freeze\x00', 0x0, 0x0) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) (async) openat$cgroup_ro(r1, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) (async) ioctl$FS_IOC_RESVSP(r2, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) (async) ioctl$FS_IOC_RESVSP(r2, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) (async) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r2, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) (async) write$tun(r2, 0x0, 0x0) (async) openat$cgroup_ro(r2, &(0x7f0000000080)='cpuset.effective_mems\x00', 0x0, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) [ 2170.845003][ T7181] bond1397: (slave bridge1262): making interface the new active one [ 2170.895598][ T7181] bond1397: (slave bridge1262): Enslaving as an active interface with an up link [ 2170.899508][ T7211] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:41:47 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xc3ffffff}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:41:47 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000040)='memory.numa_stat\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2170.960318][ T7197] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 10:41:47 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(0xffffffffffffffff, &(0x7f0000000080)=ANY=[@ANYBLOB="2321202e2f66696c6530202720200adb941c5a01c5b98440db3f5d2ff89a0ae2b2e8809e2556012f990209ae08a74d71fe42ccbe13ed4f35a8b37b722a569137ea59d8fc21a81923b278772c3961256c98123ef9b55b48e4c5909bc583ca86c986387eb5489837f066bb83d6034e2a91f1d6ec4b5836c9b5fc317150b7a67d76aff9a6ace29e8ade056a79987852efc5b0dfc83bed611d3fbfdbcc0338287056735b013bf0d2be3d48083e46c084c224a01a14167216c03f71625e65319a97939be26e034bffcc8a8f"], 0xd1) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f00000001c0)) r1 = socket$inet(0x2, 0x4, 0x6) getsockopt$ARPT_SO_GET_INFO(r1, 0x0, 0x60, &(0x7f00000002c0)={'filter\x00', 0x0, [0x9, 0xe4d9]}, &(0x7f0000000340)=0x44) r2 = accept$phonet_pipe(r0, &(0x7f0000000000), &(0x7f0000000180)=0x10) mmap(&(0x7f0000ffd000/0x1000)=nil, 0x1000, 0x2, 0x4000010, r2, 0x2d8ba000) pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r3) accept4$phonet_pipe(r3, 0x0, &(0x7f0000000380), 0x800) r4 = socket$inet_sctp(0x2, 0x1, 0x84) getsockopt$inet_sctp_SCTP_MAX_BURST(r4, 0x84, 0xd, &(0x7f0000000000)=@assoc_value, &(0x7f00000000c0)=0x8) ioctl$FS_IOC_FSGETXATTR(r4, 0x801c581f, &(0x7f0000000240)={0x2, 0x3, 0x7, 0xffff7d8f, 0xead}) setsockopt$MRT6_ASSERT(r0, 0x29, 0xcf, &(0x7f0000000280)=0x1, 0x4) [ 2171.057638][ T7197] 8021q: adding VLAN 0 to HW filter on device bond842 10:41:47 executing program 2: openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000040)='memory.numa_stat\x00', 0x275a, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000040)='memory.numa_stat\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2171.202035][ T7200] bond842: (slave bridge999): making interface the new active one 10:41:47 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) r1 = socket$inet6_sctp(0xa, 0x5, 0x84) getsockopt$inet_sctp6_SCTP_SOCKOPT_CONNECTX3(r1, 0x84, 0x6f, &(0x7f0000000000)={0x0, 0x1c, &(0x7f0000000080)=[@in6={0xa, 0x0, 0x0, @rand_addr=' \x01\x00'}]}, &(0x7f0000000180)=0x10) (async) getsockopt$inet_sctp6_SCTP_SOCKOPT_CONNECTX3(r1, 0x84, 0x6f, &(0x7f0000000000)={0x0, 0x1c, &(0x7f0000000080)=[@in6={0xa, 0x0, 0x0, @rand_addr=' \x01\x00'}]}, &(0x7f0000000180)=0x10) r2 = socket$inet_sctp(0x2, 0x1, 0x84) getsockopt$inet_sctp_SCTP_MAX_BURST(r2, 0x84, 0xd, &(0x7f0000000000)=@assoc_value={0x0}, &(0x7f0000000240)=0x8) getsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(r1, 0x84, 0x9, &(0x7f00000000c0)={r3, @in6={{0xa, 0x0, 0x0, @rand_addr=' \x01\x00'}}}, &(0x7f00000001c0)=0x9c) (async) getsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(r1, 0x84, 0x9, &(0x7f00000000c0)={r3, @in6={{0xa, 0x0, 0x0, @rand_addr=' \x01\x00'}}}, &(0x7f00000001c0)=0x9c) socket$inet6_sctp(0xa, 0x5, 0x84) (async) r4 = socket$inet6_sctp(0xa, 0x5, 0x84) getsockopt$inet_sctp6_SCTP_SOCKOPT_CONNECTX3(r4, 0x84, 0x6f, &(0x7f0000000000)={0x0, 0x1c, &(0x7f0000000080)=[@in6={0xa, 0x0, 0x0, @rand_addr=' \x01\x00'}]}, &(0x7f0000000180)=0x10) (async) getsockopt$inet_sctp6_SCTP_SOCKOPT_CONNECTX3(r4, 0x84, 0x6f, &(0x7f0000000000)={0x0, 0x1c, &(0x7f0000000080)=[@in6={0xa, 0x0, 0x0, @rand_addr=' \x01\x00'}]}, &(0x7f0000000180)=0x10) socket$inet_sctp(0x2, 0x1, 0x84) (async) r5 = socket$inet_sctp(0x2, 0x1, 0x84) getsockopt$inet_sctp_SCTP_MAX_BURST(r5, 0x84, 0xd, &(0x7f0000000000)=@assoc_value={0x0}, &(0x7f0000000240)=0x8) getsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(r4, 0x84, 0x9, &(0x7f00000000c0)={r6, @in6={{0xa, 0x0, 0x0, @rand_addr=' \x01\x00'}}}, &(0x7f00000001c0)=0x9c) (async) getsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(r4, 0x84, 0x9, &(0x7f00000000c0)={r6, @in6={{0xa, 0x0, 0x0, @rand_addr=' \x01\x00'}}}, &(0x7f00000001c0)=0x9c) sendmmsg$inet_sctp(0xffffffffffffffff, &(0x7f0000002500)=[{&(0x7f0000000080)=@in6={0xa, 0x4e21, 0x8, @ipv4={'\x00', '\xff\xff', @dev={0xac, 0x14, 0x14, 0x22}}, 0x8}, 0x1c, &(0x7f00000000c0)=[{&(0x7f0000000180)="3d65fa7e48adeec0fd3c489625d4590724a2f28c77ada8c7de8af7252e9550e02b72579a5606a174fff5fa29ffedf53c42161d683b2f1db5c9fc263c5dcf4b79c9a07ec58eb4443e6a7b1f6b76c0c400784326a4df70458a2e870c8a5d55618c1e14b31a066bbeb8e31c1842486627b42ef71f16e9ff9cfbc8908d396110397eccb0596f493fe4dc8d4768c3f3bef790df5246ca7f9a0abb29c83b8cae3390f5237f75bb9d6be2f1d57bfc992f03ec6e3c74fa6577f01b26c8648111533ec701aa8f50089f2e2c2a68a5c30d3af6fce829ec57bb1c221e869f86edc900f32ceda6c8651c8cce9cd8639e9cfe2b8de5b177d4a2403eef1f3d3898", 0xfa}, {&(0x7f0000000280)="2536f5a441b8bada3ce0d7692f5baedefac1854476d2b939492d8d32baff8735ec2776bde22d10137eb8471b4ecfd9100946d150d6624b79bb169baab07a22bacb9cade18b2a4b494c82596e771ddd886de49836f54e689303f135b2615a3922a0447e235b69e53de3d66c8a34f22aa27d606311c13171b720cc58ccb7c08a9563a6cf", 0x83}, {&(0x7f0000000400)="1e310bf3e9424d30b348f30e22813e76839837ccf621b49ac36c208abba39fdc5706cb118c10c773d2d9c7f52b2620b86317029ba48519c877b716989f333b3ce22634732385a0415d2880858c18f3cbe3ab43083c44f00942c439bdc00e736adee871877fde441ca000c1268f9dfcc6fa34518d35a5decfbc965c522de258e3196bc3323d91f8340a4c3b4fdc93278274f80e8fa64273ae6563d31eb62bcd966cb1b5163f1551f9cbd0d6b33ec50f0a67f5cf1af42233175a16c731c783c6637e983e256f87b12db03d216e5ea564d189964bc711b5e94b6b54366721fe3074971681c4ee71faf8800e7ba7692115e7", 0xf0}, {&(0x7f0000000500)="aab4f6825921c0f35d8bd6685c87775994cc4dc6e1c0788b9fe9e5d7272c4e4ec1bde6698a8e82936cd7c200040537dc763671411f1184302103feca44bbc74c5540141c51d0283b541340da3bbeced6ba7ec09e5c10dedecbc696525125e1c2303cb313d9a26d61fe523c7262142390209584adb3fd01bbca9d13553eea79f22cb14819d1cbd391b7490f12b76466174362b0a277febf83ca8fa1ddbd5a0987eafaa1130a8848143cf96ce42f22a22439f74c0580e85b5fbb47a28288b28fd03bb6c6031af9befcc459062b2c973691a87e92bf3143e7fc79a99b2607a16eb87c5d07a21e", 0xe5}], 0x4, &(0x7f0000000100)=[@init={0x18, 0x84, 0x0, {0x478e, 0x9, 0x8, 0x8}}, @prinfo={0x18, 0x84, 0x5, {0x20, 0x5}}], 0x30, 0x80}, {&(0x7f0000000340)=@in6={0xa, 0x4e20, 0x8, @local, 0x6}, 0x1c, &(0x7f0000000700)=[{&(0x7f0000000600)="9c8c1438807510237179d0a82f35034713fb662ca2cd1068970928122898cc7a42b1c9ae455e67af829f9db8e0740647f44d889e65d5714bf3de9b2e87358129f17484ad858687718331d5ac728ae4c622957c848091b3f5fa4cef8642491f70201d8c766e3defac45b68a7f3c79bcebab93c8bcdd6ac775cf6601bbc87c79192f145822fecadebfe48f7daa6f9be2713208cda055ed37c8db65ab617ee856484422191fcaba0d617124165019fbe2a31bbbe48c6156cd2661e8c3c64e5c617bc89f3a295c017f3f7265a0f50e563bad6bec2450aa3a22d216ba561e65a3ef76fafd84cbe8792d3d9c", 0xe9}, {&(0x7f0000000380)}], 0x2, &(0x7f0000000740)=[@authinfo={0x18, 0x84, 0x6, {0x15a}}, @dstaddrv6={0x20, 0x84, 0x8, @local}, @authinfo={0x18, 0x84, 0x6, {0x9}}, @init={0x18, 0x84, 0x0, {0xff, 0x7, 0x6, 0xffff}}, @init={0x18, 0x84, 0x0, {0x8, 0xde33, 0x0, 0x4}}, @dstaddrv4={0x18, 0x84, 0x7, @dev={0xac, 0x14, 0x14, 0x30}}], 0x98, 0x10}, {&(0x7f0000000800)=@in={0x2, 0x4e20, @local}, 0x10, &(0x7f0000000d40)=[{&(0x7f0000000840)="c34c73687c4e1741a17d136e935daeb3bfd1f39392dc147348b60254468195b82cd7c325f8836930f5513f90753881ab73910bca9358cb8c13e6e7dcfcb8a68644a5217f7c67d889f80de01fb9501b04a1a6bfb95e827585df940620ddc74f4ceb6b8640e62b9c534a00ecb73220f8f8da6350c89832ccae76ac722e9c740e318c782b2677f1d4603b4b6ac9cb39beda49fa4f9b962160c3f57f291ebd8f6a44753bf44e6a6a23d8d159b9cc6e6a02e0cc6c9ead4928629dfa4fc87a7be3a1edecac73f6d20e2b5d086eaa056a6e7b66260491c605200f5b4e9bd40e123c03f36031df70236eb0aad4b1060f1ffcbeef88c229c3c1f4a6bdba", 0xf9}, {&(0x7f0000000940)="7d1e8041a68f950f5575bd38520ead5976ad83083c5142b2e54437b3cdb5b594e3a3801f2d907a23c89523276fafe2a42157e92dc154e9ecbdc5feabe2beff84efdce7044a114aec3f9a9fbfda9c844db16d049964e0d3a9b5fd90cafd7dbb65a540c29f35775331434348971b5b1887b190bc4a98daf2c88a059cbc71d6775af422cc3dae65e28d41551e78413f69509863e9dd988d34f4a3f00b0375936e6893aaf9a269c6a4be36799393ccc31f7e0bf55745aa209d7336c0b368fbeb79", 0xbf}, {&(0x7f0000000a00)="cc7304e3bdb7a92ccb7e59dedc29d0480a02d6b073c253348af3fc42980706e9aeeecbc6fa9f1c41cd7a4873ec7f49350f1aaeb74f441313e4aba8597dee894d3a5a527f464e0b6431f9c1da346834d4d49b0c1ecfe4096f952f6aaedcf5ce3a64d01da54ebfa390bd5e72c82794792a3e3adc6d72d73c4904bf71d2", 0x7c}, {&(0x7f0000000a80)="63fe68478e6b6c0f4d1388693cf1cfdf35242a6c9464f94e0dc2a7a785b0f91be367c7e6d84850f7cc3f314711eb4fc253f48171290be50e58b6914d1daf5e084748e8215b00eb4bf336910db166ec9376cd40cfba2f75e50ce1325e281b66093eea2e82b90db05b7b79031155b227b9afae9b49288003", 0x77}, {&(0x7f0000000b00)="7227d3e3fc465b4142b03060839ed314fd69e8a2aca47f74a7d755ab55ea76db422bcc0c799dde7a967a02d7eab32794c53f670b6340c5c3f7e91a6290c17d05c2f97291a90eb3798456a205b9af37ebb6056225ec598c2d06c7f02c90776e012b498a55af4cd5c1c1488015a2f334cd9d9d15b346644aa18bd4af6b53dfb2ec0cfa47d73ad17d1d9e6bbbdd9646ecf3d97078e2560a3552862b70522c61b60bcd4719fb6eee5f76caba01a4b96e0c4615b554df26a79feba318288a2d942e800de6fda6cef342732111eb4cd47b3180254030c2d1b5f2740a3d0a47beb14a9f19500d3555c97b7a175119a66f1085ccbccee376cdd40dd84f0e5662c2ae", 0xfe}, {&(0x7f0000000c00)="80ccba57687b4ee4139c5ac44688431acd3f5a958d02a5da1da3f9eb893b9086d4e04397ce71bd230cb9e2edab09ee1c81941ca5dc87bb1224aff67adabe2b0958aecf9f0e3a672cbe8dd8990a5410d18619f09d9e65f824a5f4153c7a62a0e7a27fe6ac68260032ea84a8d5bc42ec5a0a25b325f856d8", 0x77}, {&(0x7f0000000c80)="4268d77d28cadf351cba2f13396c51cf4e8894ac2c347fe6c6206fe098cb55ba00f1a92b989c32ed676ca34490a50571bf5e9193afcda87c822f43c2fb83e98ae5ad29043cb0c3914aad610b6f094867e72dbd528df2d50e9120f8317c4f8e84c3881d80f0f2f8773e3935b45abb99a977570cc4d2095541dd148beea6e6acb513f47cd22d73417fdc91b13bd5be2757a65a8eed1d8d896b1b7dab5dfdbf0d1e82681dfc1a90377d9e63dcd5ebc6361d8f44dfbfcd9d373acdd60d", 0xbb}], 0x7, &(0x7f0000000dc0)=[@init={0x18, 0x84, 0x0, {0xb160, 0x7fff, 0x8}}], 0x18, 0x20004010}, {&(0x7f0000000e00)=@in6={0xa, 0x4e21, 0x7fff, @ipv4={'\x00', '\xff\xff', @loopback}, 0x8}, 0x1c, &(0x7f0000002300)=[{&(0x7f0000000e40)="934b98851726179e510473ec539a49a6f5906b436731eb995c7ff30a2287b46010069a6f26a61190d4d3e965ee6d5b74af2895e29dfe58dac0f8d14b5895320136cc92bef857fd2cce0505844403ff2cd9e2f6a1d9c577d8aff7039644b8b4bbc7dfa95f8739ac48629d15d0e80b6c1a09751159616adace486f883b852cebd6d80d17911a2350b0cb7401b960e8506f8201c3e7e2c821a683319cdfede6b1853b880ffa048a2f390fc01e7e0eab5af2fd999d225ac1f94f535551e028d07edc408f7fd81dab6c90965188b403a683aa3f4ec5490a2fbb6bcf33", 0xda}, {&(0x7f0000000f40)="952ff30e3f3bcc41091a623e049ea1e94daf03d6a288239477ffffa3399dbac54d4ef100ef9f1534ea12dcfafcea00d9906a83859c3ab406ac43bc7bd9c085100291c380b3bb759c16018aa897e720c9af604d6a65176bf53f97ae2f3acc0d0ae28397ef12c87c841ce50442e6f80335dc3d08be79b9edc91e0b232424de9df600456704570ac0810fe13107c23070e813073d4f740cdede6e00cdb74335e125a20c5dc4350dad8c4ed146718d002d40f7ec4a676cbde53a96a580f38e7085a2eacdfd9886", 0xc5}, {&(0x7f0000001040)="7413ce088fa0a62855cede438e1602d0121c5032403d6e0d926ee2107d8a8f1ad2ff338407b00d79d147d5e80461882d5f88811b84f76e60b8ba7827127b398a890c4d1c0b087b3d1c3c8982fb5cc855ad25970e9b015dae07d01820d3bc195a121562db792463f1e3dc93d0c0bc3d4c6eb7e8e997ae77dd15c0327d12d786faa4a769a1438fff524162e4b9d575f8d4a7ec8ada646d987508407992732b4e0f5f55cea70fe27663115a37a77e72d24bc94871311fb3cf8bccb7d05456fed014e0bd5c27981031373fa63bf8709a72f16ca93eb1fbcd53f79c731744e9e33285f0fed5a1a0a8244e8117bb865917aaace428923f42d858c273f82216fa2ab53c37c27386f812b588a117a7df8e281739224cc2c60b26e45edd1095beeded9e3b7d5a7eee4810257a7aad775721d7ebfaf5a047a30c43363964a720e300967bac7e0f54f61d3e5756b0273c79ebf95e01ac30907eb9eaf661efa1771291f6605385e734582c1a100808faceb2bef516da546169b82c594d346fbf1d224f98dae9b9ff4d39714bb11d61c0ff81425be2b6b60f779c5d59549481357023f5a1b701ef0a2b8f780656e6250c171b1a96c0c60e5ef80745caa8c3b1025b274793a6bc5da5426e1b208d60f64f3d6c8e0b057d579d3935e71d2134c90327eeb4e53688f2f751d5981b960257e9de7c5b55d9711c9f2d0f6daeb307d21906cd4fd69cc791e96b7938460e35007cb79cd0e7f4d4475ae797659608b5ce091bfeea4ea9271b545484f77b0ff8651a5f09cacbcc0c3b4a12c1232936c886d771ad65ce643acc3c12d5a33a842d25ae0f8ca0e5d01251ff03e267a2e04224aebfc2b75fa5d3147b38201f5f124b30cde4062893c7736fd61974969b28a377eb0a4c3c4e0fe9d988e6706839d21e7b45a72284d8c639723a1bd230e0ab6ad8d9e811fc7ce66bc2032c82a5d89670d00bb76c7fbdf764268618c790d5b55eb63e2debd733f1d2db713a722ca9bb8ca044d31be385df1573468fd91c92fda22029cc4f441405aa25c13e7cc4e7706396adbdfc64acbade757cd1b64330919f43d17c51a8aa6539c7eb463df2e411c46109c0843f2121660f021b8b2c08d2daedd1d00e9ba1fcd491a4817fa897cd485b09028b995c4016d05a0650514a72ac6b7c5e49edfc50bd0b10ebb36a4878b7e0425ecbc5107a48635e7b54adfbfcf72648c509b7562108d4c586fcf51c2b8259cc8d4eebcf99c71fd741434603beef5bc58cd1f4c7cf44ceeec46285bc15a251ef432c232849058a212bf3b49e7337c69c28a7b800844549c631381088375c921453d30b5b7e929c5382e66d9b3a8512481d4963e0117d4c5adf44e1a1debd5984f0ece0ee0e4534442967cb79dd2a4bbd6723b4bd6e9814c17f0125ceb05f6320aa0fab26cbbbb99c5d9cd5197bf19907d0b29e1cada9e8a00b4abc9916b7ac5bec96e868707398bff38c089ba7c2c7bc300817e6de7f0c527036abba68c8292a3e5e19a01e5371cad9699dcacb974c83f8b671d0e4320e22309e6be74dca954af9ba9fc76e6d5c392059611e4f4ad1e60fada161d70c32ced7b39bbb31fd73dde596f1ab2a6962bc5deb92fed1f98c74ca779c95f6b41c745b39be56fb230abbcf579dc0d30b8ee0afe78bbb849c7ac29ca8fb876215419cf6ce2d38cc99e6d528c98d8912d6454ca999dbf1ca5c0a52d70b33b02e930dd473a24580e66a53f74f46dd2a53805366cf75e0094555a963d02d3af0d8e7cd1ab592a19e985970f3dfecfc7bf7e0d32f94b0e1d13888394b32a78b92928dbfe61f02756c8a4cfd507350709681d33000679c2071f7cdf5d946c4cad93b4be5d2322035c32747079bd8148df6fb51c59b2c8dc9a95d89f1f67a82731f4a4314b0469e500d2d6244097f5c622cae59afe0d973cffd87919b129ba16404003559a376123d72e3b153916b4fcec2c8e70f4d5904bb6d961ece44eb8037ed7fbb0de43b068360472d038f0063d3dcabc70e22c4ac84310267a06905bc53ee7dde1c7a931a30037365ded88d61b976d0c574bcf1a4ca2a2ff2e2acdedf20ae91e79bcc499eb50774232898a0c2dbcc784e6a4f18baa5036f4130e0c8430dac6e216e17c85431f6ac9644ec90f45583f56209f90d693114d65ef5989b98ec81c140a70468a439192dcb9d8d1e3d0209242ee142cc8c9eaff29c951b9ad6318113cd2e1045493efe2f5052e6c627b3d81f6f900f39fe9912ee73056fbe8dd2fb8f1013459c515b7908c0aa9c0625a93edb04e0724c1cb7ccffc66a15bb1e7810d696fabb2a950ce6b533ec87a8840ad47898f54c2f7eade535aa6e849a8235ac42d4397f8404bf2436c0480ca205215641f186e74ed8d04ef793b0fb457133b80fbaea3ec13094d1fb98b19923a483c25bda173cd3f438804e444bed852ec3908203944c708c60fb27a3d0c742cf8b4b18f4a3f9b8dc9d87000198f30f3d57d18095c3c589d710a1277afd00376504e1b8fc021eff540fb6922658e1958b7cb79937fe00c73ba1fdaebd331e622822ff57078d509d44960375371d749951ee319c320354e4120d841a4693e461770084016733fd8525538354d9b4231300188bc0ed8a46a1db9d316b5e67e45bba09c9dc623fa7f67e229402410bdc4391b73ccc9aeb0075c1884219fc17dd662bb4f9e4069761584a0703b3ab0c334c725d46205d6b276c6442d52d3c454798277930fe086f650c4dad1cae28b6c28f16246b9a56a1cc859ee6feb13c9c3058a89ee212be9ac3b2902e1c6297aa4d3b87a884f7b57d8e4f11671c31a2d6b63d54f18b779151ee512cfaf61af6e7bc477abd0a0625d9dfff3b3ce4b403909856e31a03998fb887720d081209c7e302bc8b5382c0d47695393058831b70ffe6424e014c7d0df3efa49166d1f0906911fd83d7b3e7f3d3d6ef534b0a279ecfbe36741e668007821b0f8a157cc64b265c286f246ea6707efc51b71cff1999c6441768840eedc9e698afd3ef738f61f7027c473bca098367a84c2f97505f9fcf2a1a469bdc6aadddb78d14717bdeae17b2b497adc8c71216ddab964937540591d25aff4b7db6b16ca5250677304832ac04d46a230840124e0cb768c6b6dc58c4bce83854ff707ac8eee83d6be5c51b52584ed1b874780c7ac559be76e34f4f1e53d1465c201dec01ad0cfdf69cc6a02670397201345fb111d5f2105030b662038ac268d1fefc8d0c27b8cf8a347765ffd50a8a9c29bd83ce79b807de04abba6107a6404daf5cd8114a8a72b0a6c8675e851506df84501159647b048d8e75c157ace189ceefa88bec8cb28dda4c0bbf91dffb8e459efb43cc0bf21f2b3df6c34b5dc22c50dc6ba88f7e50616dae26727b5cd094201e45ebc9add451d19e30c37f5a2b9e4793d297d405ece2330d887c13687c0877380f6e02c2c99a988cc172937929b51caf2ffb109c03fe1a9e110829a5d3738a0b6a40f70e80c338e38faf1707bedd34ff6b80b9448ad9fcc9f7e835074ea074219a5b7b3eaf717f84e2ddf9b88403cb9d7b106ed21061c22a8e5c836bc62e8c7d7e89146e114fd224d90ce7bf98b3a3003ccf161e22a02cfe596d2592a0e6d46b549d5992c69ece3b711495577ea86caee85846800ae623419b55089c8f4fc54464da66615cf18ee2489e1e9cda06915715a1ffab9076fba7845c7e9f279e94d515816aecc7ef718f4f9b58e8c31592df2eb3cb3f18fb323504dac608e816ae11b1e23ec8aea9e5881f2658beabd1e19d2c39db6e3f3a23c22e9ad47ddd03522e87a56dc35837ec5aab583b4adc9a6ef73f9189114cdf165fd7e7cebce94e24c21fadf761531ffe3338d74d400e588178d550d9a7e27036c84487b2944ffaa83d83d2290574212a5b68c7124f4eea7cdfcf7813085221ec5e33f98fad5031cb92af92ed1a6959981630202fdd6a11ab422eeb2e3a74887f1b568510bc29bbbce4128bdbcd86e95609571d2a0ecbf5e645e61d8b42d031839356c36ce428361de5bb631e9f3f81ca7291baa8d9e2eb70e3306687735e538e54826f0f570edb3bf1082676f2bbe0a6b10a87e74008f89d906222a822bb5124e9f44f7163725f3f4f62fd2d8347f700ddef66e16c4935c50be9c22dc83a5e4fadf07dab6dc48a5531af859d5773cdc898838647b506acf3fbb9743120503383f81a3c3736d922396a3a0c9d7c31781c0d23224590e93a2a8f6bae2c48c3c1cd25a6b3331dd42e76c6639ffeadf8b30e678bc3e2dd33968e7ad457d715e8a72eb2d87c2f8e57c5238b4dd13adda2ebadadd0c773b8636ce1fa8dbf2ef01983f8aa1f59c50251f51c168225f9affdad6c0fca6d8ccf9a5dbb2765cbcf65a782b24ef18c2579ae3f1681a1615c3bb450b56570507cf0e7f3d498f52a352e0001ab58cf14266e086f68ee6c45d598c2317767616a2ea70ab2db78c01e459992e8df001f30198727dc64f12ebcb56ec1898d95fd8aa6ef0cdf0e28a8a97d376a6a399c33f80370bc0e7a2ae5502389bac840984c0e1a857455e16bd2ae312dcd6e744cf502256f549a81087d2460f90fc2c2d3947324fb4c6b01671e054cb67863ad21e7e92a2ea6984bdc72aa77daf2a84aec4bfe71deefb5cba2ff3f0d3ee71b753d025f41bfe186c41520421a304cd501da0a13c70b29c6bd621dd7e386bdb94b85959f835c06872f00a88521820f18b730816c9ad35bd26df1675cccdb203475b0977c01dcca429b3ef98ed14486d1de1b1504db73632b1fe5f9b569ae5ca0afe95ffffecc22b286842ae3459f889918dc79fef29080b4b197d99b7f4a21424dcdec71275863c797ee039beccb82e9f1371d493ee47c0d224b0fbaff85a0451de359696ecf2629c4ca8bc649bcb834ddcb56dcd1e65c2cf1ab916cc482dc491dc3085c662861b046ea9249a0ec0e74207fd4dc422d67f12ae32a25471c4a34abb62205d6975a8cbb63644b8993ae78419d87c95674636ca3359ea073b13842563a6af66c356945e77a289c96810bafb4f9c34915476bd4817e8a52626b63d0cd1b16424aa12d6230f76fba5414ffdac66ea7da2f6c9731179bafd4cee443dd3d5de3b58a6f02d298d1916509c58f2f9b0074795a347258bd7095d1a3b9ebb584b6984593333986b66db733920c9a0a4e80b0d3b8613903ad144359d45e22804337353e0143869dca33bcd34a7cf78e52173b7940c1b61eeefcbad300944e3538e3c343c2ec24938ec3ad6bf5b3d656ab8d34d65a55c2fac2baf5c1f54abe648dacdf7c4bd955c41a5df0660587b454b07e2e360371db3d280708d7e233d4ed743fafee63690acce3589fdf9408a6d7c9a840c9f5e37669b4640de282f3fee71474958a391282dd968985c2292d7a79dba248847ce18b4bb1ba2959c964ef80fca9fbe9b86ae2eddcfea2ea4b722d25a3618f3b3d960c2fc94efdbc71cc77adfcae58d208d604e20d8a4526aec18d8dca895c54947a25cd7598109ea4f1e1046c9e5fb5393bba376eddb3b001f08eec8f8eca6051bcf9f6e9d4459aba50c5e58fe31f3430bfe86904569965dbe476dab0c5a4d0125c15f38f319089af6bb06c3ee9943d550b04525de88a8b1787652c504ee79fb726174478887817195254853f0a8d32a37d08487eec9e3a607de4d64006abd10dcb8e065bf4f96fb027c901219df0558f6738722b2ba2f5a2651dc6259288edd3e0772d17f2139e65344dc3bb2312ae981b0d3a333f2a569f3b27dcb26b516771963e748827f649450676973de3feaeaddd91d2354304ca", 0x1000}, {&(0x7f0000002040)="0fffd1853e17205b0cd00724e90796a7d9dcc66665d29823c8932e9986501f45b94a8478ae466c42a817ac2ab8d8e703e3c73580c88afdbf0c489e35a71fb0a195fe30e775fda4103cb5f930161c65c05c08a6d014017e1e29ee598f99aeba2a8b5437be4ea0c7f10004b42b684a1fc1b437937b2f3f142faec42097994e3e9a4c6cb362ef72c864c2c3f3ba2e45023de9e3e9937ad2f0acceb22fbb0b23b35da205efde4593dd81b65cc246729883fd280e7645ac159368593a18c844eca7b51c195d1731174686266fe5fea289f19a55a9e8d54bdaf773", 0xd8}, {&(0x7f0000002140)="5d08523c8af851f0f96d045a16cff5290020a05da0b48992e89c41c65a59d2798efbd0f95a27166c19021f891ab9d93e1ae7fe1ac54d4eafbfe9a722ed3e6570e7b9f1ad6e71811c8f5c494901d72ca0111c7a4e06592de00fb2c1c94ad9319d6f54e65261de8610bafcaa164970c772f7ff0b0f319d3ff91aa0dfdf92ec3b58e37c894a8a64bd97e10eab0eaee877ec1500662567263aac0ac725a5a9fe8a49dada29d8ac8c3d93e80fa964e2a447d5be3c8a304306eacc689b2b2dffd2a076a3bb1ed4ef", 0xc5}, {&(0x7f0000002240)="49e437cbe796a2f7c9765be899ec8624240e047c46850df62822560f5daeedb24758092b87f78acaa277af9c13f405ad9b6c3c1c98e30ef971152aef1eeba390878d71f13ceb8e69dc775ba6c1ca03de229deb0da394bc9363619d8534e1bb3464f567cfccb64ecbe83e239afa7cc406c2a96a57fb45e57b33bd4c1be0eadfd79c92c91f4f0531780f4dee3f712a9494cb27775684351757f7b058a12bb9c47f478048003dd30127e7b45a5ce99e", 0xae}], 0x6, &(0x7f0000002380)=[@sndrcv={0x30, 0x84, 0x1, {0x80, 0x5, 0x820f, 0xffffffb4, 0x9, 0xc66c, 0x8, 0x6, r3}}, @sndrcv={0x30, 0x84, 0x1, {0x9, 0x4000, 0x207, 0x9, 0x0, 0x81, 0xa4e, 0x1}}, @authinfo={0x18, 0x84, 0x6, {0x101}}, @dstaddrv6={0x20, 0x84, 0x8, @private1}, @sndrcv={0x30, 0x84, 0x1, {0x4, 0xb87f, 0x8000, 0x81, 0x20, 0x400, 0x80000001, 0xe53b, r6}}, @authinfo={0x18, 0x84, 0x6, {0x6}}, @sndinfo={0x20, 0x84, 0x2, {0x2, 0x0, 0x101, 0x7fffffff}}, @dstaddrv6={0x20, 0x84, 0x8, @mcast1}, @sndrcv={0x30, 0x84, 0x1, {0x0, 0x5, 0x1, 0x85, 0x81, 0x80000001, 0x9, 0xbb}}, @authinfo={0x18, 0x84, 0x6, {0x8}}], 0x168, 0x20000000}], 0x4, 0x2400c004) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r7, 0x0, 0x8000000000004) openat$cgroup_ro(r7, &(0x7f0000000000)='pids.current\x00', 0x0, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) (async) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r8 = socket$netlink(0x10, 0x3, 0x0) ioctl$sock_kcm_SIOCKCMCLONE(r7, 0x89e2, &(0x7f0000000380)={r0}) (async) ioctl$sock_kcm_SIOCKCMCLONE(r7, 0x89e2, &(0x7f0000000380)={r0}) sendmsg$NFT_MSG_GETOBJ_RESET(r9, &(0x7f00000027c0)={&(0x7f0000002600)={0x10, 0x0, 0x0, 0x20000}, 0xc, &(0x7f0000002780)={&(0x7f0000002640)={0x60, 0x15, 0xa, 0x101, 0x0, 0x0, {0x2, 0x0, 0x1}, [@NFTA_OBJ_USERDATA={0x1a, 0x8, "ff9a6d06aeca0ecc6d390465fc7d5874e1fe35d40c31"}, @NFTA_OBJ_TABLE={0x9, 0x1, 'syz1\x00'}, @NFTA_OBJ_TABLE={0x9, 0x1, 'syz1\x00'}, @NFTA_OBJ_TABLE={0x9, 0x1, 'syz1\x00'}, @NFTA_OBJ_HANDLE={0xc, 0x6, 0x1, 0x0, 0x2}]}, 0x60}, 0x1, 0x0, 0x0, 0x1}, 0x40) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) r10 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r10, 0x0, 0x8000000000004) syz_genetlink_get_family_id$batadv(&(0x7f00000026c0), r10) sendmsg$nl_route(r8, 0x0, 0x0) 10:41:47 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0x9a010000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) [ 2171.267317][ T7200] bond842: (slave bridge999): Enslaving as an active interface with an up link 10:41:47 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000040)='memory.numa_stat\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2171.369424][ T7212] netlink: 'syz-executor.3': attribute type 1 has an invalid length. [ 2171.491916][ T7212] 8021q: adding VLAN 0 to HW filter on device bond1433 10:41:47 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(0xffffffffffffffff, &(0x7f0000000080)=ANY=[@ANYBLOB="2321202e2f66696c6530202720200adb941c5a01c5b98440db3f5d2ff89a0ae2b2e8809e2556012f990209ae08a74d71fe42ccbe13ed4f35a8b37b722a569137ea59d8fc21a81923b278772c3961256c98123ef9b55b48e4c5909bc583ca86c986387eb5489837f066bb83d6034e2a91f1d6ec4b5836c9b5fc317150b7a67d76aff9a6ace29e8ade056a79987852efc5b0dfc83bed611d3fbfdbcc0338287056735b013bf0d2be3d48083e46c084c224a01a14167216c03f71625e65319a97939be26e034bffcc8a8f"], 0xd1) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f00000001c0)) (async) r1 = socket$inet(0x2, 0x4, 0x6) getsockopt$ARPT_SO_GET_INFO(r1, 0x0, 0x60, &(0x7f00000002c0)={'filter\x00', 0x0, [0x9, 0xe4d9]}, &(0x7f0000000340)=0x44) r2 = accept$phonet_pipe(r0, &(0x7f0000000000), &(0x7f0000000180)=0x10) mmap(&(0x7f0000ffd000/0x1000)=nil, 0x1000, 0x2, 0x4000010, r2, 0x2d8ba000) pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r3) (async) accept4$phonet_pipe(r3, 0x0, &(0x7f0000000380), 0x800) (async) r4 = socket$inet_sctp(0x2, 0x1, 0x84) getsockopt$inet_sctp_SCTP_MAX_BURST(r4, 0x84, 0xd, &(0x7f0000000000)=@assoc_value, &(0x7f00000000c0)=0x8) ioctl$FS_IOC_FSGETXATTR(r4, 0x801c581f, &(0x7f0000000240)={0x2, 0x3, 0x7, 0xffff7d8f, 0xead}) setsockopt$MRT6_ASSERT(r0, 0x29, 0xcf, &(0x7f0000000280)=0x1, 0x4) 10:41:47 executing program 2: r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r1 = openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r1, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r1, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r1, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r1, 0x0, 0x0) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000000)='pids.current\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r2, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r3) openat$cgroup_ro(r3, &(0x7f0000000080)='memory.current\x00', 0x0, 0x0) 10:41:47 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (async) r1 = socket$inet6_sctp(0xa, 0x5, 0x84) getsockopt$inet_sctp6_SCTP_SOCKOPT_CONNECTX3(r1, 0x84, 0x6f, &(0x7f0000000000)={0x0, 0x1c, &(0x7f0000000080)=[@in6={0xa, 0x0, 0x0, @rand_addr=' \x01\x00'}]}, &(0x7f0000000180)=0x10) (async) r2 = socket$inet_sctp(0x2, 0x1, 0x84) getsockopt$inet_sctp_SCTP_MAX_BURST(r2, 0x84, 0xd, &(0x7f0000000000)=@assoc_value={0x0}, &(0x7f0000000240)=0x8) getsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(r1, 0x84, 0x9, &(0x7f00000000c0)={r3, @in6={{0xa, 0x0, 0x0, @rand_addr=' \x01\x00'}}}, &(0x7f00000001c0)=0x9c) r4 = socket$inet6_sctp(0xa, 0x5, 0x84) getsockopt$inet_sctp6_SCTP_SOCKOPT_CONNECTX3(r4, 0x84, 0x6f, &(0x7f0000000000)={0x0, 0x1c, &(0x7f0000000080)=[@in6={0xa, 0x0, 0x0, @rand_addr=' \x01\x00'}]}, &(0x7f0000000180)=0x10) (async) r5 = socket$inet_sctp(0x2, 0x1, 0x84) getsockopt$inet_sctp_SCTP_MAX_BURST(r5, 0x84, 0xd, &(0x7f0000000000)=@assoc_value={0x0}, &(0x7f0000000240)=0x8) getsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(r4, 0x84, 0x9, &(0x7f00000000c0)={r6, @in6={{0xa, 0x0, 0x0, @rand_addr=' \x01\x00'}}}, &(0x7f00000001c0)=0x9c) sendmmsg$inet_sctp(0xffffffffffffffff, &(0x7f0000002500)=[{&(0x7f0000000080)=@in6={0xa, 0x4e21, 0x8, @ipv4={'\x00', '\xff\xff', @dev={0xac, 0x14, 0x14, 0x22}}, 0x8}, 0x1c, &(0x7f00000000c0)=[{&(0x7f0000000180)="3d65fa7e48adeec0fd3c489625d4590724a2f28c77ada8c7de8af7252e9550e02b72579a5606a174fff5fa29ffedf53c42161d683b2f1db5c9fc263c5dcf4b79c9a07ec58eb4443e6a7b1f6b76c0c400784326a4df70458a2e870c8a5d55618c1e14b31a066bbeb8e31c1842486627b42ef71f16e9ff9cfbc8908d396110397eccb0596f493fe4dc8d4768c3f3bef790df5246ca7f9a0abb29c83b8cae3390f5237f75bb9d6be2f1d57bfc992f03ec6e3c74fa6577f01b26c8648111533ec701aa8f50089f2e2c2a68a5c30d3af6fce829ec57bb1c221e869f86edc900f32ceda6c8651c8cce9cd8639e9cfe2b8de5b177d4a2403eef1f3d3898", 0xfa}, {&(0x7f0000000280)="2536f5a441b8bada3ce0d7692f5baedefac1854476d2b939492d8d32baff8735ec2776bde22d10137eb8471b4ecfd9100946d150d6624b79bb169baab07a22bacb9cade18b2a4b494c82596e771ddd886de49836f54e689303f135b2615a3922a0447e235b69e53de3d66c8a34f22aa27d606311c13171b720cc58ccb7c08a9563a6cf", 0x83}, {&(0x7f0000000400)="1e310bf3e9424d30b348f30e22813e76839837ccf621b49ac36c208abba39fdc5706cb118c10c773d2d9c7f52b2620b86317029ba48519c877b716989f333b3ce22634732385a0415d2880858c18f3cbe3ab43083c44f00942c439bdc00e736adee871877fde441ca000c1268f9dfcc6fa34518d35a5decfbc965c522de258e3196bc3323d91f8340a4c3b4fdc93278274f80e8fa64273ae6563d31eb62bcd966cb1b5163f1551f9cbd0d6b33ec50f0a67f5cf1af42233175a16c731c783c6637e983e256f87b12db03d216e5ea564d189964bc711b5e94b6b54366721fe3074971681c4ee71faf8800e7ba7692115e7", 0xf0}, {&(0x7f0000000500)="aab4f6825921c0f35d8bd6685c87775994cc4dc6e1c0788b9fe9e5d7272c4e4ec1bde6698a8e82936cd7c200040537dc763671411f1184302103feca44bbc74c5540141c51d0283b541340da3bbeced6ba7ec09e5c10dedecbc696525125e1c2303cb313d9a26d61fe523c7262142390209584adb3fd01bbca9d13553eea79f22cb14819d1cbd391b7490f12b76466174362b0a277febf83ca8fa1ddbd5a0987eafaa1130a8848143cf96ce42f22a22439f74c0580e85b5fbb47a28288b28fd03bb6c6031af9befcc459062b2c973691a87e92bf3143e7fc79a99b2607a16eb87c5d07a21e", 0xe5}], 0x4, &(0x7f0000000100)=[@init={0x18, 0x84, 0x0, {0x478e, 0x9, 0x8, 0x8}}, @prinfo={0x18, 0x84, 0x5, {0x20, 0x5}}], 0x30, 0x80}, {&(0x7f0000000340)=@in6={0xa, 0x4e20, 0x8, @local, 0x6}, 0x1c, &(0x7f0000000700)=[{&(0x7f0000000600)="9c8c1438807510237179d0a82f35034713fb662ca2cd1068970928122898cc7a42b1c9ae455e67af829f9db8e0740647f44d889e65d5714bf3de9b2e87358129f17484ad858687718331d5ac728ae4c622957c848091b3f5fa4cef8642491f70201d8c766e3defac45b68a7f3c79bcebab93c8bcdd6ac775cf6601bbc87c79192f145822fecadebfe48f7daa6f9be2713208cda055ed37c8db65ab617ee856484422191fcaba0d617124165019fbe2a31bbbe48c6156cd2661e8c3c64e5c617bc89f3a295c017f3f7265a0f50e563bad6bec2450aa3a22d216ba561e65a3ef76fafd84cbe8792d3d9c", 0xe9}, {&(0x7f0000000380)}], 0x2, &(0x7f0000000740)=[@authinfo={0x18, 0x84, 0x6, {0x15a}}, @dstaddrv6={0x20, 0x84, 0x8, @local}, @authinfo={0x18, 0x84, 0x6, {0x9}}, @init={0x18, 0x84, 0x0, {0xff, 0x7, 0x6, 0xffff}}, @init={0x18, 0x84, 0x0, {0x8, 0xde33, 0x0, 0x4}}, @dstaddrv4={0x18, 0x84, 0x7, @dev={0xac, 0x14, 0x14, 0x30}}], 0x98, 0x10}, {&(0x7f0000000800)=@in={0x2, 0x4e20, @local}, 0x10, &(0x7f0000000d40)=[{&(0x7f0000000840)="c34c73687c4e1741a17d136e935daeb3bfd1f39392dc147348b60254468195b82cd7c325f8836930f5513f90753881ab73910bca9358cb8c13e6e7dcfcb8a68644a5217f7c67d889f80de01fb9501b04a1a6bfb95e827585df940620ddc74f4ceb6b8640e62b9c534a00ecb73220f8f8da6350c89832ccae76ac722e9c740e318c782b2677f1d4603b4b6ac9cb39beda49fa4f9b962160c3f57f291ebd8f6a44753bf44e6a6a23d8d159b9cc6e6a02e0cc6c9ead4928629dfa4fc87a7be3a1edecac73f6d20e2b5d086eaa056a6e7b66260491c605200f5b4e9bd40e123c03f36031df70236eb0aad4b1060f1ffcbeef88c229c3c1f4a6bdba", 0xf9}, {&(0x7f0000000940)="7d1e8041a68f950f5575bd38520ead5976ad83083c5142b2e54437b3cdb5b594e3a3801f2d907a23c89523276fafe2a42157e92dc154e9ecbdc5feabe2beff84efdce7044a114aec3f9a9fbfda9c844db16d049964e0d3a9b5fd90cafd7dbb65a540c29f35775331434348971b5b1887b190bc4a98daf2c88a059cbc71d6775af422cc3dae65e28d41551e78413f69509863e9dd988d34f4a3f00b0375936e6893aaf9a269c6a4be36799393ccc31f7e0bf55745aa209d7336c0b368fbeb79", 0xbf}, {&(0x7f0000000a00)="cc7304e3bdb7a92ccb7e59dedc29d0480a02d6b073c253348af3fc42980706e9aeeecbc6fa9f1c41cd7a4873ec7f49350f1aaeb74f441313e4aba8597dee894d3a5a527f464e0b6431f9c1da346834d4d49b0c1ecfe4096f952f6aaedcf5ce3a64d01da54ebfa390bd5e72c82794792a3e3adc6d72d73c4904bf71d2", 0x7c}, {&(0x7f0000000a80)="63fe68478e6b6c0f4d1388693cf1cfdf35242a6c9464f94e0dc2a7a785b0f91be367c7e6d84850f7cc3f314711eb4fc253f48171290be50e58b6914d1daf5e084748e8215b00eb4bf336910db166ec9376cd40cfba2f75e50ce1325e281b66093eea2e82b90db05b7b79031155b227b9afae9b49288003", 0x77}, {&(0x7f0000000b00)="7227d3e3fc465b4142b03060839ed314fd69e8a2aca47f74a7d755ab55ea76db422bcc0c799dde7a967a02d7eab32794c53f670b6340c5c3f7e91a6290c17d05c2f97291a90eb3798456a205b9af37ebb6056225ec598c2d06c7f02c90776e012b498a55af4cd5c1c1488015a2f334cd9d9d15b346644aa18bd4af6b53dfb2ec0cfa47d73ad17d1d9e6bbbdd9646ecf3d97078e2560a3552862b70522c61b60bcd4719fb6eee5f76caba01a4b96e0c4615b554df26a79feba318288a2d942e800de6fda6cef342732111eb4cd47b3180254030c2d1b5f2740a3d0a47beb14a9f19500d3555c97b7a175119a66f1085ccbccee376cdd40dd84f0e5662c2ae", 0xfe}, {&(0x7f0000000c00)="80ccba57687b4ee4139c5ac44688431acd3f5a958d02a5da1da3f9eb893b9086d4e04397ce71bd230cb9e2edab09ee1c81941ca5dc87bb1224aff67adabe2b0958aecf9f0e3a672cbe8dd8990a5410d18619f09d9e65f824a5f4153c7a62a0e7a27fe6ac68260032ea84a8d5bc42ec5a0a25b325f856d8", 0x77}, {&(0x7f0000000c80)="4268d77d28cadf351cba2f13396c51cf4e8894ac2c347fe6c6206fe098cb55ba00f1a92b989c32ed676ca34490a50571bf5e9193afcda87c822f43c2fb83e98ae5ad29043cb0c3914aad610b6f094867e72dbd528df2d50e9120f8317c4f8e84c3881d80f0f2f8773e3935b45abb99a977570cc4d2095541dd148beea6e6acb513f47cd22d73417fdc91b13bd5be2757a65a8eed1d8d896b1b7dab5dfdbf0d1e82681dfc1a90377d9e63dcd5ebc6361d8f44dfbfcd9d373acdd60d", 0xbb}], 0x7, &(0x7f0000000dc0)=[@init={0x18, 0x84, 0x0, {0xb160, 0x7fff, 0x8}}], 0x18, 0x20004010}, {&(0x7f0000000e00)=@in6={0xa, 0x4e21, 0x7fff, @ipv4={'\x00', '\xff\xff', @loopback}, 0x8}, 0x1c, &(0x7f0000002300)=[{&(0x7f0000000e40)="934b98851726179e510473ec539a49a6f5906b436731eb995c7ff30a2287b46010069a6f26a61190d4d3e965ee6d5b74af2895e29dfe58dac0f8d14b5895320136cc92bef857fd2cce0505844403ff2cd9e2f6a1d9c577d8aff7039644b8b4bbc7dfa95f8739ac48629d15d0e80b6c1a09751159616adace486f883b852cebd6d80d17911a2350b0cb7401b960e8506f8201c3e7e2c821a683319cdfede6b1853b880ffa048a2f390fc01e7e0eab5af2fd999d225ac1f94f535551e028d07edc408f7fd81dab6c90965188b403a683aa3f4ec5490a2fbb6bcf33", 0xda}, {&(0x7f0000000f40)="952ff30e3f3bcc41091a623e049ea1e94daf03d6a288239477ffffa3399dbac54d4ef100ef9f1534ea12dcfafcea00d9906a83859c3ab406ac43bc7bd9c085100291c380b3bb759c16018aa897e720c9af604d6a65176bf53f97ae2f3acc0d0ae28397ef12c87c841ce50442e6f80335dc3d08be79b9edc91e0b232424de9df600456704570ac0810fe13107c23070e813073d4f740cdede6e00cdb74335e125a20c5dc4350dad8c4ed146718d002d40f7ec4a676cbde53a96a580f38e7085a2eacdfd9886", 0xc5}, {&(0x7f0000001040)="7413ce088fa0a62855cede438e1602d0121c5032403d6e0d926ee2107d8a8f1ad2ff338407b00d79d147d5e80461882d5f88811b84f76e60b8ba7827127b398a890c4d1c0b087b3d1c3c8982fb5cc855ad25970e9b015dae07d01820d3bc195a121562db792463f1e3dc93d0c0bc3d4c6eb7e8e997ae77dd15c0327d12d786faa4a769a1438fff524162e4b9d575f8d4a7ec8ada646d987508407992732b4e0f5f55cea70fe27663115a37a77e72d24bc94871311fb3cf8bccb7d05456fed014e0bd5c27981031373fa63bf8709a72f16ca93eb1fbcd53f79c731744e9e33285f0fed5a1a0a8244e8117bb865917aaace428923f42d858c273f82216fa2ab53c37c27386f812b588a117a7df8e281739224cc2c60b26e45edd1095beeded9e3b7d5a7eee4810257a7aad775721d7ebfaf5a047a30c43363964a720e300967bac7e0f54f61d3e5756b0273c79ebf95e01ac30907eb9eaf661efa1771291f6605385e734582c1a100808faceb2bef516da546169b82c594d346fbf1d224f98dae9b9ff4d39714bb11d61c0ff81425be2b6b60f779c5d59549481357023f5a1b701ef0a2b8f780656e6250c171b1a96c0c60e5ef80745caa8c3b1025b274793a6bc5da5426e1b208d60f64f3d6c8e0b057d579d3935e71d2134c90327eeb4e53688f2f751d5981b960257e9de7c5b55d9711c9f2d0f6daeb307d21906cd4fd69cc791e96b7938460e35007cb79cd0e7f4d4475ae797659608b5ce091bfeea4ea9271b545484f77b0ff8651a5f09cacbcc0c3b4a12c1232936c886d771ad65ce643acc3c12d5a33a842d25ae0f8ca0e5d01251ff03e267a2e04224aebfc2b75fa5d3147b38201f5f124b30cde4062893c7736fd61974969b28a377eb0a4c3c4e0fe9d988e6706839d21e7b45a72284d8c639723a1bd230e0ab6ad8d9e811fc7ce66bc2032c82a5d89670d00bb76c7fbdf764268618c790d5b55eb63e2debd733f1d2db713a722ca9bb8ca044d31be385df1573468fd91c92fda22029cc4f441405aa25c13e7cc4e7706396adbdfc64acbade757cd1b64330919f43d17c51a8aa6539c7eb463df2e411c46109c0843f2121660f021b8b2c08d2daedd1d00e9ba1fcd491a4817fa897cd485b09028b995c4016d05a0650514a72ac6b7c5e49edfc50bd0b10ebb36a4878b7e0425ecbc5107a48635e7b54adfbfcf72648c509b7562108d4c586fcf51c2b8259cc8d4eebcf99c71fd741434603beef5bc58cd1f4c7cf44ceeec46285bc15a251ef432c232849058a212bf3b49e7337c69c28a7b800844549c631381088375c921453d30b5b7e929c5382e66d9b3a8512481d4963e0117d4c5adf44e1a1debd5984f0ece0ee0e4534442967cb79dd2a4bbd6723b4bd6e9814c17f0125ceb05f6320aa0fab26cbbbb99c5d9cd5197bf19907d0b29e1cada9e8a00b4abc9916b7ac5bec96e868707398bff38c089ba7c2c7bc300817e6de7f0c527036abba68c8292a3e5e19a01e5371cad9699dcacb974c83f8b671d0e4320e22309e6be74dca954af9ba9fc76e6d5c392059611e4f4ad1e60fada161d70c32ced7b39bbb31fd73dde596f1ab2a6962bc5deb92fed1f98c74ca779c95f6b41c745b39be56fb230abbcf579dc0d30b8ee0afe78bbb849c7ac29ca8fb876215419cf6ce2d38cc99e6d528c98d8912d6454ca999dbf1ca5c0a52d70b33b02e930dd473a24580e66a53f74f46dd2a53805366cf75e0094555a963d02d3af0d8e7cd1ab592a19e985970f3dfecfc7bf7e0d32f94b0e1d13888394b32a78b92928dbfe61f02756c8a4cfd507350709681d33000679c2071f7cdf5d946c4cad93b4be5d2322035c32747079bd8148df6fb51c59b2c8dc9a95d89f1f67a82731f4a4314b0469e500d2d6244097f5c622cae59afe0d973cffd87919b129ba16404003559a376123d72e3b153916b4fcec2c8e70f4d5904bb6d961ece44eb8037ed7fbb0de43b068360472d038f0063d3dcabc70e22c4ac84310267a06905bc53ee7dde1c7a931a30037365ded88d61b976d0c574bcf1a4ca2a2ff2e2acdedf20ae91e79bcc499eb50774232898a0c2dbcc784e6a4f18baa5036f4130e0c8430dac6e216e17c85431f6ac9644ec90f45583f56209f90d693114d65ef5989b98ec81c140a70468a439192dcb9d8d1e3d0209242ee142cc8c9eaff29c951b9ad6318113cd2e1045493efe2f5052e6c627b3d81f6f900f39fe9912ee73056fbe8dd2fb8f1013459c515b7908c0aa9c0625a93edb04e0724c1cb7ccffc66a15bb1e7810d696fabb2a950ce6b533ec87a8840ad47898f54c2f7eade535aa6e849a8235ac42d4397f8404bf2436c0480ca205215641f186e74ed8d04ef793b0fb457133b80fbaea3ec13094d1fb98b19923a483c25bda173cd3f438804e444bed852ec3908203944c708c60fb27a3d0c742cf8b4b18f4a3f9b8dc9d87000198f30f3d57d18095c3c589d710a1277afd00376504e1b8fc021eff540fb6922658e1958b7cb79937fe00c73ba1fdaebd331e622822ff57078d509d44960375371d749951ee319c320354e4120d841a4693e461770084016733fd8525538354d9b4231300188bc0ed8a46a1db9d316b5e67e45bba09c9dc623fa7f67e229402410bdc4391b73ccc9aeb0075c1884219fc17dd662bb4f9e4069761584a0703b3ab0c334c725d46205d6b276c6442d52d3c454798277930fe086f650c4dad1cae28b6c28f16246b9a56a1cc859ee6feb13c9c3058a89ee212be9ac3b2902e1c6297aa4d3b87a884f7b57d8e4f11671c31a2d6b63d54f18b779151ee512cfaf61af6e7bc477abd0a0625d9dfff3b3ce4b403909856e31a03998fb887720d081209c7e302bc8b5382c0d47695393058831b70ffe6424e014c7d0df3efa49166d1f0906911fd83d7b3e7f3d3d6ef534b0a279ecfbe36741e668007821b0f8a157cc64b265c286f246ea6707efc51b71cff1999c6441768840eedc9e698afd3ef738f61f7027c473bca098367a84c2f97505f9fcf2a1a469bdc6aadddb78d14717bdeae17b2b497adc8c71216ddab964937540591d25aff4b7db6b16ca5250677304832ac04d46a230840124e0cb768c6b6dc58c4bce83854ff707ac8eee83d6be5c51b52584ed1b874780c7ac559be76e34f4f1e53d1465c201dec01ad0cfdf69cc6a02670397201345fb111d5f2105030b662038ac268d1fefc8d0c27b8cf8a347765ffd50a8a9c29bd83ce79b807de04abba6107a6404daf5cd8114a8a72b0a6c8675e851506df84501159647b048d8e75c157ace189ceefa88bec8cb28dda4c0bbf91dffb8e459efb43cc0bf21f2b3df6c34b5dc22c50dc6ba88f7e50616dae26727b5cd094201e45ebc9add451d19e30c37f5a2b9e4793d297d405ece2330d887c13687c0877380f6e02c2c99a988cc172937929b51caf2ffb109c03fe1a9e110829a5d3738a0b6a40f70e80c338e38faf1707bedd34ff6b80b9448ad9fcc9f7e835074ea074219a5b7b3eaf717f84e2ddf9b88403cb9d7b106ed21061c22a8e5c836bc62e8c7d7e89146e114fd224d90ce7bf98b3a3003ccf161e22a02cfe596d2592a0e6d46b549d5992c69ece3b711495577ea86caee85846800ae623419b55089c8f4fc54464da66615cf18ee2489e1e9cda06915715a1ffab9076fba7845c7e9f279e94d515816aecc7ef718f4f9b58e8c31592df2eb3cb3f18fb323504dac608e816ae11b1e23ec8aea9e5881f2658beabd1e19d2c39db6e3f3a23c22e9ad47ddd03522e87a56dc35837ec5aab583b4adc9a6ef73f9189114cdf165fd7e7cebce94e24c21fadf761531ffe3338d74d400e588178d550d9a7e27036c84487b2944ffaa83d83d2290574212a5b68c7124f4eea7cdfcf7813085221ec5e33f98fad5031cb92af92ed1a6959981630202fdd6a11ab422eeb2e3a74887f1b568510bc29bbbce4128bdbcd86e95609571d2a0ecbf5e645e61d8b42d031839356c36ce428361de5bb631e9f3f81ca7291baa8d9e2eb70e3306687735e538e54826f0f570edb3bf1082676f2bbe0a6b10a87e74008f89d906222a822bb5124e9f44f7163725f3f4f62fd2d8347f700ddef66e16c4935c50be9c22dc83a5e4fadf07dab6dc48a5531af859d5773cdc898838647b506acf3fbb9743120503383f81a3c3736d922396a3a0c9d7c31781c0d23224590e93a2a8f6bae2c48c3c1cd25a6b3331dd42e76c6639ffeadf8b30e678bc3e2dd33968e7ad457d715e8a72eb2d87c2f8e57c5238b4dd13adda2ebadadd0c773b8636ce1fa8dbf2ef01983f8aa1f59c50251f51c168225f9affdad6c0fca6d8ccf9a5dbb2765cbcf65a782b24ef18c2579ae3f1681a1615c3bb450b56570507cf0e7f3d498f52a352e0001ab58cf14266e086f68ee6c45d598c2317767616a2ea70ab2db78c01e459992e8df001f30198727dc64f12ebcb56ec1898d95fd8aa6ef0cdf0e28a8a97d376a6a399c33f80370bc0e7a2ae5502389bac840984c0e1a857455e16bd2ae312dcd6e744cf502256f549a81087d2460f90fc2c2d3947324fb4c6b01671e054cb67863ad21e7e92a2ea6984bdc72aa77daf2a84aec4bfe71deefb5cba2ff3f0d3ee71b753d025f41bfe186c41520421a304cd501da0a13c70b29c6bd621dd7e386bdb94b85959f835c06872f00a88521820f18b730816c9ad35bd26df1675cccdb203475b0977c01dcca429b3ef98ed14486d1de1b1504db73632b1fe5f9b569ae5ca0afe95ffffecc22b286842ae3459f889918dc79fef29080b4b197d99b7f4a21424dcdec71275863c797ee039beccb82e9f1371d493ee47c0d224b0fbaff85a0451de359696ecf2629c4ca8bc649bcb834ddcb56dcd1e65c2cf1ab916cc482dc491dc3085c662861b046ea9249a0ec0e74207fd4dc422d67f12ae32a25471c4a34abb62205d6975a8cbb63644b8993ae78419d87c95674636ca3359ea073b13842563a6af66c356945e77a289c96810bafb4f9c34915476bd4817e8a52626b63d0cd1b16424aa12d6230f76fba5414ffdac66ea7da2f6c9731179bafd4cee443dd3d5de3b58a6f02d298d1916509c58f2f9b0074795a347258bd7095d1a3b9ebb584b6984593333986b66db733920c9a0a4e80b0d3b8613903ad144359d45e22804337353e0143869dca33bcd34a7cf78e52173b7940c1b61eeefcbad300944e3538e3c343c2ec24938ec3ad6bf5b3d656ab8d34d65a55c2fac2baf5c1f54abe648dacdf7c4bd955c41a5df0660587b454b07e2e360371db3d280708d7e233d4ed743fafee63690acce3589fdf9408a6d7c9a840c9f5e37669b4640de282f3fee71474958a391282dd968985c2292d7a79dba248847ce18b4bb1ba2959c964ef80fca9fbe9b86ae2eddcfea2ea4b722d25a3618f3b3d960c2fc94efdbc71cc77adfcae58d208d604e20d8a4526aec18d8dca895c54947a25cd7598109ea4f1e1046c9e5fb5393bba376eddb3b001f08eec8f8eca6051bcf9f6e9d4459aba50c5e58fe31f3430bfe86904569965dbe476dab0c5a4d0125c15f38f319089af6bb06c3ee9943d550b04525de88a8b1787652c504ee79fb726174478887817195254853f0a8d32a37d08487eec9e3a607de4d64006abd10dcb8e065bf4f96fb027c901219df0558f6738722b2ba2f5a2651dc6259288edd3e0772d17f2139e65344dc3bb2312ae981b0d3a333f2a569f3b27dcb26b516771963e748827f649450676973de3feaeaddd91d2354304ca", 0x1000}, {&(0x7f0000002040)="0fffd1853e17205b0cd00724e90796a7d9dcc66665d29823c8932e9986501f45b94a8478ae466c42a817ac2ab8d8e703e3c73580c88afdbf0c489e35a71fb0a195fe30e775fda4103cb5f930161c65c05c08a6d014017e1e29ee598f99aeba2a8b5437be4ea0c7f10004b42b684a1fc1b437937b2f3f142faec42097994e3e9a4c6cb362ef72c864c2c3f3ba2e45023de9e3e9937ad2f0acceb22fbb0b23b35da205efde4593dd81b65cc246729883fd280e7645ac159368593a18c844eca7b51c195d1731174686266fe5fea289f19a55a9e8d54bdaf773", 0xd8}, {&(0x7f0000002140)="5d08523c8af851f0f96d045a16cff5290020a05da0b48992e89c41c65a59d2798efbd0f95a27166c19021f891ab9d93e1ae7fe1ac54d4eafbfe9a722ed3e6570e7b9f1ad6e71811c8f5c494901d72ca0111c7a4e06592de00fb2c1c94ad9319d6f54e65261de8610bafcaa164970c772f7ff0b0f319d3ff91aa0dfdf92ec3b58e37c894a8a64bd97e10eab0eaee877ec1500662567263aac0ac725a5a9fe8a49dada29d8ac8c3d93e80fa964e2a447d5be3c8a304306eacc689b2b2dffd2a076a3bb1ed4ef", 0xc5}, {&(0x7f0000002240)="49e437cbe796a2f7c9765be899ec8624240e047c46850df62822560f5daeedb24758092b87f78acaa277af9c13f405ad9b6c3c1c98e30ef971152aef1eeba390878d71f13ceb8e69dc775ba6c1ca03de229deb0da394bc9363619d8534e1bb3464f567cfccb64ecbe83e239afa7cc406c2a96a57fb45e57b33bd4c1be0eadfd79c92c91f4f0531780f4dee3f712a9494cb27775684351757f7b058a12bb9c47f478048003dd30127e7b45a5ce99e", 0xae}], 0x6, &(0x7f0000002380)=[@sndrcv={0x30, 0x84, 0x1, {0x80, 0x5, 0x820f, 0xffffffb4, 0x9, 0xc66c, 0x8, 0x6, r3}}, @sndrcv={0x30, 0x84, 0x1, {0x9, 0x4000, 0x207, 0x9, 0x0, 0x81, 0xa4e, 0x1}}, @authinfo={0x18, 0x84, 0x6, {0x101}}, @dstaddrv6={0x20, 0x84, 0x8, @private1}, @sndrcv={0x30, 0x84, 0x1, {0x4, 0xb87f, 0x8000, 0x81, 0x20, 0x400, 0x80000001, 0xe53b, r6}}, @authinfo={0x18, 0x84, 0x6, {0x6}}, @sndinfo={0x20, 0x84, 0x2, {0x2, 0x0, 0x101, 0x7fffffff}}, @dstaddrv6={0x20, 0x84, 0x8, @mcast1}, @sndrcv={0x30, 0x84, 0x1, {0x0, 0x5, 0x1, 0x85, 0x81, 0x80000001, 0x9, 0xbb}}, @authinfo={0x18, 0x84, 0x6, {0x8}}], 0x168, 0x20000000}], 0x4, 0x2400c004) (async) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r7, 0x0, 0x8000000000004) openat$cgroup_ro(r7, &(0x7f0000000000)='pids.current\x00', 0x0, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) r8 = socket$netlink(0x10, 0x3, 0x0) (async) ioctl$sock_kcm_SIOCKCMCLONE(r7, 0x89e2, &(0x7f0000000380)={r0}) sendmsg$NFT_MSG_GETOBJ_RESET(r9, &(0x7f00000027c0)={&(0x7f0000002600)={0x10, 0x0, 0x0, 0x20000}, 0xc, &(0x7f0000002780)={&(0x7f0000002640)={0x60, 0x15, 0xa, 0x101, 0x0, 0x0, {0x2, 0x0, 0x1}, [@NFTA_OBJ_USERDATA={0x1a, 0x8, "ff9a6d06aeca0ecc6d390465fc7d5874e1fe35d40c31"}, @NFTA_OBJ_TABLE={0x9, 0x1, 'syz1\x00'}, @NFTA_OBJ_TABLE={0x9, 0x1, 'syz1\x00'}, @NFTA_OBJ_TABLE={0x9, 0x1, 'syz1\x00'}, @NFTA_OBJ_HANDLE={0xc, 0x6, 0x1, 0x0, 0x2}]}, 0x60}, 0x1, 0x0, 0x0, 0x1}, 0x40) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) r10 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r10, 0x0, 0x8000000000004) syz_genetlink_get_family_id$batadv(&(0x7f00000026c0), r10) (async) sendmsg$nl_route(r8, 0x0, 0x0) [ 2171.782052][ T7213] bond1433: (slave bridge1333): making interface the new active one [ 2171.791932][ T7260] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2171.870135][ T7213] bond1433: (slave bridge1333): Enslaving as an active interface with an up link [ 2171.959945][ T7223] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 10:41:48 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xf0ffffff}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:41:48 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(0xffffffffffffffff, &(0x7f0000000080)=ANY=[@ANYBLOB="2321202e2f66696c6530202720200adb941c5a01c5b98440db3f5d2ff89a0ae2b2e8809e2556012f990209ae08a74d71fe42ccbe13ed4f35a8b37b722a569137ea59d8fc21a81923b278772c3961256c98123ef9b55b48e4c5909bc583ca86c986387eb5489837f066bb83d6034e2a91f1d6ec4b5836c9b5fc317150b7a67d76aff9a6ace29e8ade056a79987852efc5b0dfc83bed611d3fbfdbcc0338287056735b013bf0d2be3d48083e46c084c224a01a14167216c03f71625e65319a97939be26e034bffcc8a8f"], 0xd1) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f00000001c0)) (async) r1 = socket$inet(0x2, 0x4, 0x6) getsockopt$ARPT_SO_GET_INFO(r1, 0x0, 0x60, &(0x7f00000002c0)={'filter\x00', 0x0, [0x9, 0xe4d9]}, &(0x7f0000000340)=0x44) (async) r2 = accept$phonet_pipe(r0, &(0x7f0000000000), &(0x7f0000000180)=0x10) mmap(&(0x7f0000ffd000/0x1000)=nil, 0x1000, 0x2, 0x4000010, r2, 0x2d8ba000) (async) pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r3) (async) accept4$phonet_pipe(r3, 0x0, &(0x7f0000000380), 0x800) (async) r4 = socket$inet_sctp(0x2, 0x1, 0x84) getsockopt$inet_sctp_SCTP_MAX_BURST(r4, 0x84, 0xd, &(0x7f0000000000)=@assoc_value, &(0x7f00000000c0)=0x8) (async, rerun: 32) ioctl$FS_IOC_FSGETXATTR(r4, 0x801c581f, &(0x7f0000000240)={0x2, 0x3, 0x7, 0xffff7d8f, 0xead}) (rerun: 32) setsockopt$MRT6_ASSERT(r0, 0x29, 0xcf, &(0x7f0000000280)=0x1, 0x4) [ 2172.020679][ T7223] workqueue: Failed to create a rescuer kthread for wq "bond1398": -EINTR [ 2172.157308][ T7227] bridge1263: entered promiscuous mode [ 2172.194182][ T7227] bridge1263: entered allmulticast mode [ 2172.401282][ T7254] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2172.424123][ T7254] workqueue: Failed to create a rescuer kthread for wq "bond843": -EINTR [ 2172.577206][ T7283] 8021q: adding VLAN 0 to HW filter on device bond1434 10:41:48 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xca030000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:41:48 executing program 2: r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r1 = openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r1, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r1, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) (async) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r1, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) (async, rerun: 64) write$tun(r1, 0x0, 0x0) (async, rerun: 64) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000000)='pids.current\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r2, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r3) (async) openat$cgroup_ro(r3, &(0x7f0000000080)='memory.current\x00', 0x0, 0x0) 10:41:48 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = syz_init_net_socket$nl_rdma(0x10, 0x3, 0x10) sendmsg$netlink(r0, &(0x7f0000001ac0)={0x0, 0x0, &(0x7f0000000300)=[{&(0x7f0000006480)={0x14, 0x14, 0x1, 0x0, 0x0, "", [@nested={0x2}]}, 0x14}], 0x1}, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) getsockopt$bt_BT_RCVMTU(0xffffffffffffffff, 0x112, 0xd, &(0x7f0000000000)=0x1, &(0x7f0000000040)=0x2) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r1 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r1, 0x0, 0x0) 10:41:48 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$FAT_IOCTL_SET_ATTRIBUTES(r0, 0x40047211, &(0x7f0000000000)=0x20) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:41:48 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0x9effffff}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) [ 2172.695718][ T7285] bridge1334: entered promiscuous mode [ 2172.701395][ T7285] bridge1334: entered allmulticast mode [ 2172.706672][ T7291] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:41:48 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$FAT_IOCTL_SET_ATTRIBUTES(r0, 0x40047211, &(0x7f0000000000)=0x20) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) ioctl$FAT_IOCTL_SET_ATTRIBUTES(r0, 0x40047211, &(0x7f0000000000)=0x20) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) [ 2172.795555][ T7299] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:41:49 executing program 2: r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r1 = openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r1, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r1, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) (async, rerun: 32) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r1, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) (async, rerun: 32) write$tun(r1, 0x0, 0x0) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000000)='pids.current\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r2, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r3) (async, rerun: 32) openat$cgroup_ro(r3, &(0x7f0000000080)='memory.current\x00', 0x0, 0x0) (rerun: 32) 10:41:49 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = syz_init_net_socket$nl_rdma(0x10, 0x3, 0x10) sendmsg$netlink(r0, &(0x7f0000001ac0)={0x0, 0x0, &(0x7f0000000300)=[{&(0x7f0000006480)={0x14, 0x14, 0x1, 0x0, 0x0, "", [@nested={0x2}]}, 0x14}], 0x1}, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) getsockopt$bt_BT_RCVMTU(0xffffffffffffffff, 0x112, 0xd, &(0x7f0000000000)=0x1, &(0x7f0000000040)=0x2) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r1 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r1, 0x0, 0x0) mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) syz_init_net_socket$nl_rdma(0x10, 0x3, 0x10) (async) sendmsg$netlink(r0, &(0x7f0000001ac0)={0x0, 0x0, &(0x7f0000000300)=[{&(0x7f0000006480)={0x14, 0x14, 0x1, 0x0, 0x0, "", [@nested={0x2}]}, 0x14}], 0x1}, 0x0) (async) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (async) accept(0xffffffffffffffff, 0x0, 0x0) (async) getsockopt$bt_BT_RCVMTU(0xffffffffffffffff, 0x112, 0xd, &(0x7f0000000000)=0x1, &(0x7f0000000040)=0x2) (async) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) socket$netlink(0x10, 0x3, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) sendmsg$nl_route(r1, 0x0, 0x0) (async) 10:41:49 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xf2020000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2173.068097][ T7310] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2173.146200][ T7313] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:41:49 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) r0 = syz_init_net_socket$nl_rdma(0x10, 0x3, 0x10) sendmsg$netlink(r0, &(0x7f0000001ac0)={0x0, 0x0, &(0x7f0000000300)=[{&(0x7f0000006480)={0x14, 0x14, 0x1, 0x0, 0x0, "", [@nested={0x2}]}, 0x14}], 0x1}, 0x0) (async) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (async) accept(0xffffffffffffffff, 0x0, 0x0) (async) getsockopt$bt_BT_RCVMTU(0xffffffffffffffff, 0x112, 0xd, &(0x7f0000000000)=0x1, &(0x7f0000000040)=0x2) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) r1 = socket$netlink(0x10, 0x3, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r1, 0x0, 0x0) 10:41:49 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$FAT_IOCTL_SET_ATTRIBUTES(r0, 0x40047211, &(0x7f0000000000)=0x20) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2173.270966][ T7297] 8021q: adding VLAN 0 to HW filter on device bond1398 10:41:49 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xd, &(0x7f0000000000)=@framed={{0x18, 0x0, 0x0, 0x0, 0x1ff, 0x0, 0x0, 0x0, 0x3}, [@map_idx_val={0x18, 0x4, 0x6, 0x0, 0x2, 0x0, 0x0, 0x0, 0x1}, @func={0x85, 0x0, 0x1, 0x0, 0xfffffffffffffffe}, @jmp={0x5, 0x1, 0x1, 0x6, 0x6, 0xfffffffffffffef8}, @map_fd={0x18, 0xa}, @map_fd, @alu={0x7, 0x0, 0xd, 0x9, 0x1, 0xfffffffffffffff8, 0xfffffffffffffff0}, @ldst={0x1, 0x0, 0x3, 0x7, 0x7, 0x2, 0xfffffffffffffff0}]}, &(0x7f0000000080)='GPL\x00', 0x6, 0x53, &(0x7f00000006c0)=""/83, 0x40f00, 0x4, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, &(0x7f0000000180)={0x2, 0x4}, 0x8, 0x10, &(0x7f00000001c0)={0x2, 0xc, 0x8}, 0x10, 0x0, 0x0, 0x0, &(0x7f0000000200)=[0x1, 0xffffffffffffffff]}, 0x80) preadv(r0, &(0x7f00000005c0)=[{&(0x7f00000002c0)=""/110, 0x6e}, {&(0x7f0000000400)=""/255, 0xff}, {&(0x7f0000000340)=""/93, 0x5d}, {&(0x7f0000000500)=""/182, 0xb6}], 0x4, 0x0, 0x7) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) r1 = accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r2, 0x0, 0x8000000000004) bpf$BPF_PROG_GET_FD_BY_ID(0xd, &(0x7f0000000680), 0x4) sendfile(r1, r2, &(0x7f0000000640)=0x400000008, 0x7ff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r3 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) ioctl$int_in(0xffffffffffffffff, 0x0, &(0x7f0000000600)=0x9f2) sendmsg$nl_route(r3, 0x0, 0x0) [ 2173.542295][ T7302] bond1398: (slave bridge1263): making interface the new active one [ 2173.589999][ T7302] bond1398: (slave bridge1263): Enslaving as an active interface with an up link 10:41:49 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(r0, 0x81f8943c, &(0x7f0000000080)) 10:41:49 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xd10c0000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:41:49 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r1, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) mmap(&(0x7f0000ffc000/0x1000)=nil, 0x1000, 0x1000003, 0x100010, r1, 0x9357f000) r2 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r3 = openat$cgroup_ro(r2, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r3, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r3, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r3, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r3, 0x0, 0x0) bpf$ITER_CREATE(0x21, &(0x7f0000000000)={r3}, 0x8) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) write$binfmt_script(r0, &(0x7f0000000080)={'#! ', './file0', [{0x20, 'memory.events\x00'}], 0xa, "314ccee4b7548a95a374ec63044b45b7055e5b79663c59a8315f3558731109acf0910b678c1447eb891c96284cdbdec9a106517dcd5a7c81f0bf10f5268e78b3b58df9017898a79c36d2c02a6ffe295174573345298627c16b0e063891f75e107153966392c03fe79a2a0695f5aae209c740f4327a409600be77b8613a5d6be41e9dc7549a12faadbd949939545f415e350d28c74bb512180fce953dad86096fabfbf7bbd62f7987b90407f0d0f02d2d983713b2c697a7e928354399e9b88bd1abfaf7190b78603192c4f03bcee07145cd0c2dc38b9ce5d1e4a68c911889b45dc427fc41ae16aaca5e14e79570c214fa"}, 0x10a) r4 = accept4$nfc_llcp(r3, 0x0, &(0x7f00000002c0), 0x80800) r5 = openat$tun(0xffffffffffffff9c, &(0x7f0000000080), 0x2, 0x0) r6 = socket$nl_route(0x10, 0x3, 0x0) r7 = socket$inet6_sctp(0xa, 0x5, 0x84) sendmmsg$inet6(r7, &(0x7f0000005900)=[{{&(0x7f0000000180)={0xa, 0x0, 0x0, @private1}, 0x1c, &(0x7f0000001680)=[{&(0x7f00000001c0)="1a", 0x1}], 0x1}}, {{&(0x7f0000002c80)={0xa, 0x0, 0x0, @ipv4={'\x00', '\xff\xff', @private=0xa010101}}, 0x1c, &(0x7f0000004180)=[{&(0x7f0000002d00)="92", 0x1}], 0x1}}], 0x2, 0x4000040) ioctl$BTRFS_IOC_BALANCE_PROGRESS(0xffffffffffffffff, 0x84009422, &(0x7f0000001740)={0x0, 0x0, {0x0, @usage, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, @struct}, {0x0, @usage, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, @struct}, {0x0, @struct}}) ioctl$BTRFS_IOC_SCRUB_PROGRESS(r7, 0xc400941d, &(0x7f00000007c0)={r8, 0x6, 0x6}) ioctl$BTRFS_IOC_SCRUB(r6, 0xc400941b, &(0x7f0000000940)={r8, 0x3f, 0x1, 0x1}) ioctl$BTRFS_IOC_DEV_INFO(r5, 0xd000941e, &(0x7f00000004c0)={r8, "57149989cf1136de6b93f2f3e5ead599"}) ioctl$BTRFS_IOC_SCRUB_PROGRESS(r2, 0xc400941d, &(0x7f0000000400)={r8, 0x1, 0x6, 0x1}) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r9, 0x0, 0x8000000000004) ioctl$F2FS_IOC_MOVE_RANGE(r4, 0xc020f509, &(0x7f00000003c0)={r9, 0x0, 0x7, 0x2}) 10:41:49 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) r0 = bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xd, &(0x7f0000000000)=@framed={{0x18, 0x0, 0x0, 0x0, 0x1ff, 0x0, 0x0, 0x0, 0x3}, [@map_idx_val={0x18, 0x4, 0x6, 0x0, 0x2, 0x0, 0x0, 0x0, 0x1}, @func={0x85, 0x0, 0x1, 0x0, 0xfffffffffffffffe}, @jmp={0x5, 0x1, 0x1, 0x6, 0x6, 0xfffffffffffffef8}, @map_fd={0x18, 0xa}, @map_fd, @alu={0x7, 0x0, 0xd, 0x9, 0x1, 0xfffffffffffffff8, 0xfffffffffffffff0}, @ldst={0x1, 0x0, 0x3, 0x7, 0x7, 0x2, 0xfffffffffffffff0}]}, &(0x7f0000000080)='GPL\x00', 0x6, 0x53, &(0x7f00000006c0)=""/83, 0x40f00, 0x4, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, &(0x7f0000000180)={0x2, 0x4}, 0x8, 0x10, &(0x7f00000001c0)={0x2, 0xc, 0x8}, 0x10, 0x0, 0x0, 0x0, &(0x7f0000000200)=[0x1, 0xffffffffffffffff]}, 0x80) preadv(r0, &(0x7f00000005c0)=[{&(0x7f00000002c0)=""/110, 0x6e}, {&(0x7f0000000400)=""/255, 0xff}, {&(0x7f0000000340)=""/93, 0x5d}, {&(0x7f0000000500)=""/182, 0xb6}], 0x4, 0x0, 0x7) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (async, rerun: 64) r1 = accept(0xffffffffffffffff, 0x0, 0x0) (async, rerun: 64) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r2, 0x0, 0x8000000000004) (async, rerun: 64) bpf$BPF_PROG_GET_FD_BY_ID(0xd, &(0x7f0000000680), 0x4) (async, rerun: 64) sendfile(r1, r2, &(0x7f0000000640)=0x400000008, 0x7ff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) r3 = socket$netlink(0x10, 0x3, 0x0) (async, rerun: 32) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (rerun: 32) ioctl$int_in(0xffffffffffffffff, 0x0, &(0x7f0000000600)=0x9f2) sendmsg$nl_route(r3, 0x0, 0x0) [ 2173.649535][ T7306] workqueue: Failed to create a rescuer kthread for wq "bond843": -EINTR [ 2173.783238][ T7350] EXT4-fs warning: 2 callbacks suppressed [ 2173.783255][ T7350] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2174.037705][ T7326] validate_nla: 3 callbacks suppressed [ 2174.037718][ T7326] netlink: 'syz-executor.3': attribute type 1 has an invalid length. 10:41:50 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xa2030000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:41:50 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(r0, 0x81f8943c, &(0x7f0000000080)) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(r0, 0x81f8943c, &(0x7f0000000080)) (async) 10:41:50 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) r0 = bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xd, &(0x7f0000000000)=@framed={{0x18, 0x0, 0x0, 0x0, 0x1ff, 0x0, 0x0, 0x0, 0x3}, [@map_idx_val={0x18, 0x4, 0x6, 0x0, 0x2, 0x0, 0x0, 0x0, 0x1}, @func={0x85, 0x0, 0x1, 0x0, 0xfffffffffffffffe}, @jmp={0x5, 0x1, 0x1, 0x6, 0x6, 0xfffffffffffffef8}, @map_fd={0x18, 0xa}, @map_fd, @alu={0x7, 0x0, 0xd, 0x9, 0x1, 0xfffffffffffffff8, 0xfffffffffffffff0}, @ldst={0x1, 0x0, 0x3, 0x7, 0x7, 0x2, 0xfffffffffffffff0}]}, &(0x7f0000000080)='GPL\x00', 0x6, 0x53, &(0x7f00000006c0)=""/83, 0x40f00, 0x4, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, &(0x7f0000000180)={0x2, 0x4}, 0x8, 0x10, &(0x7f00000001c0)={0x2, 0xc, 0x8}, 0x10, 0x0, 0x0, 0x0, &(0x7f0000000200)=[0x1, 0xffffffffffffffff]}, 0x80) preadv(r0, &(0x7f00000005c0)=[{&(0x7f00000002c0)=""/110, 0x6e}, {&(0x7f0000000400)=""/255, 0xff}, {&(0x7f0000000340)=""/93, 0x5d}, {&(0x7f0000000500)=""/182, 0xb6}], 0x4, 0x0, 0x7) (async) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) r1 = accept(0xffffffffffffffff, 0x0, 0x0) (async) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r2, 0x0, 0x8000000000004) (async) bpf$BPF_PROG_GET_FD_BY_ID(0xd, &(0x7f0000000680), 0x4) (async) sendfile(r1, r2, &(0x7f0000000640)=0x400000008, 0x7ff) (async) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r3 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) ioctl$int_in(0xffffffffffffffff, 0x0, &(0x7f0000000600)=0x9f2) sendmsg$nl_route(r3, 0x0, 0x0) [ 2174.077646][ T7326] workqueue: Failed to create a rescuer kthread for wq "bond1435": -EINTR [ 2174.185151][ T7365] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:41:50 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xf2030000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:41:50 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(r0, 0x81f8943c, &(0x7f0000000080)) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(r0, 0x81f8943c, &(0x7f0000000080)) (async) [ 2174.319463][ T7356] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 10:41:50 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) openat$cgroup_pressure(r0, &(0x7f0000000000)='cpu.pressure\x00', 0x2, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r1, 0x0, 0x0) [ 2174.476069][ T7356] 8021q: adding VLAN 0 to HW filter on device bond1399 [ 2174.511155][ T7386] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:41:50 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xd20b0000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2174.531378][ T7361] bond1399: (slave bridge1264): making interface the new active one [ 2174.545477][ T7361] bond1399: (slave bridge1264): Enslaving as an active interface with an up link [ 2174.558236][ T7373] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2174.633972][ T7388] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:41:50 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) openat$cgroup_pressure(r0, &(0x7f0000000000)='cpu.pressure\x00', 0x2, 0x0) (async) openat$cgroup_pressure(r0, &(0x7f0000000000)='cpu.pressure\x00', 0x2, 0x0) socket$netlink(0x10, 0x3, 0x0) (async) r1 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r1, 0x0, 0x0) 10:41:50 executing program 4: r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r1 = openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r1, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r1, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r1, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r1, 0x0, 0x0) ioctl$F2FS_IOC_MOVE_RANGE(r1, 0xc020f509, &(0x7f0000000080)={r1, 0x9, 0x7ff, 0x1}) r3 = openat$cgroup_ro(r2, &(0x7f0000000000)='cgroup.freeze\x00', 0x275a, 0x0) r4 = openat$cgroup_freezer_state(r0, &(0x7f00000000c0), 0x2, 0x0) pwritev(r4, &(0x7f0000000680)=[{&(0x7f0000000100)="dfb8341ce1e2c2c2e7bd348c9192181403c8ccea500f948623f038b60edb1a866e08400a6ad867736d40af1670cf8aba84afa16eaec9b2739b6578544b8d01fc6be3666e027a920aa11ddf0275928ebd59c49973bc11c53baf4fbfa846ccd7ad4846b628", 0x64}, {&(0x7f0000000180)}, {&(0x7f00000003c0)="1e1da60d744fb9bffa4a106689fb3234b80e437d8632f5052ab1f61839b2cfe218e4df16a99f36473b2c0477d3a4391797e125e003faff5032f25b2f95076fb7ffed95ef8968ef3269a1a2fd7e0f020ce9800f9b4c0da3f27eccd0161e91f1b44da6913359011f84410ff3b7b3b2e18837dc56b061f8d0935373f09be4fc7282775d9b9edba761f20429d9187a1f457385fc288950aa67e956000d40ec9ad60bc67a7816465a389502b5303dc9a7e7e4fdec748fecdbf8017ff62be48349d171d65d3975c74fc73c5a1cccf7eff4d5f60c170f", 0xd3}, {&(0x7f00000002c0)="35e41404545d43", 0x7}, {&(0x7f00000004c0)="0476af5afc1867fa8a114bce63d7bd9076cae7b41daebfc03216062f686944f0d7d3ff00a7ffd43766eb6aaecfd6688cbdbc8d4b145755504e", 0x39}, {&(0x7f0000000500)="fc5cccf83ac3595aa7f49a59e3787d83b5ddd7f0600c9eb02e1ca2586927bd097de7c84abb34d496a962db5383825424dd8649a93a146c1b3dbef05640966a32ae51a3bb853764c803b64e51dfd85bf45ee379ab56a841c8ad4cc5adb7c7918999413366e64a06e24e7f37195d0f2e1e59421b65dfb359e7b7d8ecdb3fce47638e29a8a6decf7f6631ff7f463391c613883094a9e2a48a3d46a4267eaee6b98a31bbd39fa1b057a9bf0291e4d203991a52a2a89eb3f95196b39f78f6ef3d7cac070ac75814a8de28f9174e1537c2d1e820df", 0xd2}, {&(0x7f0000000600)="1f2c27bedbd19b176bf30e914a95f21c075c00c821768caa83259bb68c597ca7e6ae198716071541d0c3d2213c1617a22e8c17069907f6befcae904698c1444d86515a5613d801c7e5f3108e57fab6f3", 0x50}], 0x7, 0x10000, 0x4) ioctl$EXT4_IOC_GROUP_EXTEND(r3, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2174.676674][ T7373] 8021q: adding VLAN 0 to HW filter on device bond843 10:41:50 executing program 4: r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r1 = openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r1, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r1, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r1, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r1, 0x0, 0x0) ioctl$F2FS_IOC_MOVE_RANGE(r1, 0xc020f509, &(0x7f0000000080)={r1, 0x9, 0x7ff, 0x1}) r3 = openat$cgroup_ro(r2, &(0x7f0000000000)='cgroup.freeze\x00', 0x275a, 0x0) r4 = openat$cgroup_freezer_state(r0, &(0x7f00000000c0), 0x2, 0x0) pwritev(r4, &(0x7f0000000680)=[{&(0x7f0000000100)="dfb8341ce1e2c2c2e7bd348c9192181403c8ccea500f948623f038b60edb1a866e08400a6ad867736d40af1670cf8aba84afa16eaec9b2739b6578544b8d01fc6be3666e027a920aa11ddf0275928ebd59c49973bc11c53baf4fbfa846ccd7ad4846b628", 0x64}, {&(0x7f0000000180)}, {&(0x7f00000003c0)="1e1da60d744fb9bffa4a106689fb3234b80e437d8632f5052ab1f61839b2cfe218e4df16a99f36473b2c0477d3a4391797e125e003faff5032f25b2f95076fb7ffed95ef8968ef3269a1a2fd7e0f020ce9800f9b4c0da3f27eccd0161e91f1b44da6913359011f84410ff3b7b3b2e18837dc56b061f8d0935373f09be4fc7282775d9b9edba761f20429d9187a1f457385fc288950aa67e956000d40ec9ad60bc67a7816465a389502b5303dc9a7e7e4fdec748fecdbf8017ff62be48349d171d65d3975c74fc73c5a1cccf7eff4d5f60c170f", 0xd3}, {&(0x7f00000002c0)="35e41404545d43", 0x7}, {&(0x7f00000004c0)="0476af5afc1867fa8a114bce63d7bd9076cae7b41daebfc03216062f686944f0d7d3ff00a7ffd43766eb6aaecfd6688cbdbc8d4b145755504e", 0x39}, {&(0x7f0000000500)="fc5cccf83ac3595aa7f49a59e3787d83b5ddd7f0600c9eb02e1ca2586927bd097de7c84abb34d496a962db5383825424dd8649a93a146c1b3dbef05640966a32ae51a3bb853764c803b64e51dfd85bf45ee379ab56a841c8ad4cc5adb7c7918999413366e64a06e24e7f37195d0f2e1e59421b65dfb359e7b7d8ecdb3fce47638e29a8a6decf7f6631ff7f463391c613883094a9e2a48a3d46a4267eaee6b98a31bbd39fa1b057a9bf0291e4d203991a52a2a89eb3f95196b39f78f6ef3d7cac070ac75814a8de28f9174e1537c2d1e820df", 0xd2}, {&(0x7f0000000600)="1f2c27bedbd19b176bf30e914a95f21c075c00c821768caa83259bb68c597ca7e6ae198716071541d0c3d2213c1617a22e8c17069907f6befcae904698c1444d86515a5613d801c7e5f3108e57fab6f3", 0x50}], 0x7, 0x10000, 0x4) ioctl$EXT4_IOC_GROUP_EXTEND(r3, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) (async) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) (async) ioctl$FS_IOC_RESVSP(r1, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) (async) ioctl$FS_IOC_RESVSP(r1, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) (async) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r1, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) (async) write$tun(r1, 0x0, 0x0) (async) ioctl$F2FS_IOC_MOVE_RANGE(r1, 0xc020f509, &(0x7f0000000080)={r1, 0x9, 0x7ff, 0x1}) (async) openat$cgroup_ro(r2, &(0x7f0000000000)='cgroup.freeze\x00', 0x275a, 0x0) (async) openat$cgroup_freezer_state(r0, &(0x7f00000000c0), 0x2, 0x0) (async) pwritev(r4, &(0x7f0000000680)=[{&(0x7f0000000100)="dfb8341ce1e2c2c2e7bd348c9192181403c8ccea500f948623f038b60edb1a866e08400a6ad867736d40af1670cf8aba84afa16eaec9b2739b6578544b8d01fc6be3666e027a920aa11ddf0275928ebd59c49973bc11c53baf4fbfa846ccd7ad4846b628", 0x64}, {&(0x7f0000000180)}, {&(0x7f00000003c0)="1e1da60d744fb9bffa4a106689fb3234b80e437d8632f5052ab1f61839b2cfe218e4df16a99f36473b2c0477d3a4391797e125e003faff5032f25b2f95076fb7ffed95ef8968ef3269a1a2fd7e0f020ce9800f9b4c0da3f27eccd0161e91f1b44da6913359011f84410ff3b7b3b2e18837dc56b061f8d0935373f09be4fc7282775d9b9edba761f20429d9187a1f457385fc288950aa67e956000d40ec9ad60bc67a7816465a389502b5303dc9a7e7e4fdec748fecdbf8017ff62be48349d171d65d3975c74fc73c5a1cccf7eff4d5f60c170f", 0xd3}, {&(0x7f00000002c0)="35e41404545d43", 0x7}, {&(0x7f00000004c0)="0476af5afc1867fa8a114bce63d7bd9076cae7b41daebfc03216062f686944f0d7d3ff00a7ffd43766eb6aaecfd6688cbdbc8d4b145755504e", 0x39}, {&(0x7f0000000500)="fc5cccf83ac3595aa7f49a59e3787d83b5ddd7f0600c9eb02e1ca2586927bd097de7c84abb34d496a962db5383825424dd8649a93a146c1b3dbef05640966a32ae51a3bb853764c803b64e51dfd85bf45ee379ab56a841c8ad4cc5adb7c7918999413366e64a06e24e7f37195d0f2e1e59421b65dfb359e7b7d8ecdb3fce47638e29a8a6decf7f6631ff7f463391c613883094a9e2a48a3d46a4267eaee6b98a31bbd39fa1b057a9bf0291e4d203991a52a2a89eb3f95196b39f78f6ef3d7cac070ac75814a8de28f9174e1537c2d1e820df", 0xd2}, {&(0x7f0000000600)="1f2c27bedbd19b176bf30e914a95f21c075c00c821768caa83259bb68c597ca7e6ae198716071541d0c3d2213c1617a22e8c17069907f6befcae904698c1444d86515a5613d801c7e5f3108e57fab6f3", 0x50}], 0x7, 0x10000, 0x4) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r3, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) [ 2174.856682][ T7379] bond843: (slave bridge1000): making interface the new active one [ 2174.917371][ T7379] bond843: (slave bridge1000): Enslaving as an active interface with an up link [ 2174.960031][ T7383] netlink: 'syz-executor.3': attribute type 1 has an invalid length. [ 2175.110404][ T7383] 8021q: adding VLAN 0 to HW filter on device bond1435 [ 2175.153869][ T7385] bond1435: (slave bridge1334): making interface the new active one [ 2175.168127][ T7385] bond1435: (slave bridge1334): Enslaving as an active interface with an up link 10:41:51 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) r1 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r1, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) (async) mmap(&(0x7f0000ffc000/0x1000)=nil, 0x1000, 0x1000003, 0x100010, r1, 0x9357f000) (async) r2 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r3 = openat$cgroup_ro(r2, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r3, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) (async) ioctl$FS_IOC_RESVSP(r3, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r3, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r3, 0x0, 0x0) (async) bpf$ITER_CREATE(0x21, &(0x7f0000000000)={r3}, 0x8) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) write$binfmt_script(r0, &(0x7f0000000080)={'#! ', './file0', [{0x20, 'memory.events\x00'}], 0xa, "314ccee4b7548a95a374ec63044b45b7055e5b79663c59a8315f3558731109acf0910b678c1447eb891c96284cdbdec9a106517dcd5a7c81f0bf10f5268e78b3b58df9017898a79c36d2c02a6ffe295174573345298627c16b0e063891f75e107153966392c03fe79a2a0695f5aae209c740f4327a409600be77b8613a5d6be41e9dc7549a12faadbd949939545f415e350d28c74bb512180fce953dad86096fabfbf7bbd62f7987b90407f0d0f02d2d983713b2c697a7e928354399e9b88bd1abfaf7190b78603192c4f03bcee07145cd0c2dc38b9ce5d1e4a68c911889b45dc427fc41ae16aaca5e14e79570c214fa"}, 0x10a) (async) r4 = accept4$nfc_llcp(r3, 0x0, &(0x7f00000002c0), 0x80800) r5 = openat$tun(0xffffffffffffff9c, &(0x7f0000000080), 0x2, 0x0) (async) r6 = socket$nl_route(0x10, 0x3, 0x0) r7 = socket$inet6_sctp(0xa, 0x5, 0x84) sendmmsg$inet6(r7, &(0x7f0000005900)=[{{&(0x7f0000000180)={0xa, 0x0, 0x0, @private1}, 0x1c, &(0x7f0000001680)=[{&(0x7f00000001c0)="1a", 0x1}], 0x1}}, {{&(0x7f0000002c80)={0xa, 0x0, 0x0, @ipv4={'\x00', '\xff\xff', @private=0xa010101}}, 0x1c, &(0x7f0000004180)=[{&(0x7f0000002d00)="92", 0x1}], 0x1}}], 0x2, 0x4000040) ioctl$BTRFS_IOC_BALANCE_PROGRESS(0xffffffffffffffff, 0x84009422, &(0x7f0000001740)={0x0, 0x0, {0x0, @usage, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, @struct}, {0x0, @usage, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, @struct}, {0x0, @struct}}) ioctl$BTRFS_IOC_SCRUB_PROGRESS(r7, 0xc400941d, &(0x7f00000007c0)={r8, 0x6, 0x6}) (async) ioctl$BTRFS_IOC_SCRUB(r6, 0xc400941b, &(0x7f0000000940)={r8, 0x3f, 0x1, 0x1}) (async) ioctl$BTRFS_IOC_DEV_INFO(r5, 0xd000941e, &(0x7f00000004c0)={r8, "57149989cf1136de6b93f2f3e5ead599"}) ioctl$BTRFS_IOC_SCRUB_PROGRESS(r2, 0xc400941d, &(0x7f0000000400)={r8, 0x1, 0x6, 0x1}) (async) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r9, 0x0, 0x8000000000004) (async) ioctl$F2FS_IOC_MOVE_RANGE(r4, 0xc020f509, &(0x7f00000003c0)={r9, 0x0, 0x7, 0x2}) 10:41:51 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xb2030000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:41:51 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xf8030000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:41:51 executing program 4: r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r1 = openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r1, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r1, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) (async) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r1, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) (async) write$tun(r1, 0x0, 0x0) (async, rerun: 32) ioctl$F2FS_IOC_MOVE_RANGE(r1, 0xc020f509, &(0x7f0000000080)={r1, 0x9, 0x7ff, 0x1}) (rerun: 32) r3 = openat$cgroup_ro(r2, &(0x7f0000000000)='cgroup.freeze\x00', 0x275a, 0x0) r4 = openat$cgroup_freezer_state(r0, &(0x7f00000000c0), 0x2, 0x0) pwritev(r4, &(0x7f0000000680)=[{&(0x7f0000000100)="dfb8341ce1e2c2c2e7bd348c9192181403c8ccea500f948623f038b60edb1a866e08400a6ad867736d40af1670cf8aba84afa16eaec9b2739b6578544b8d01fc6be3666e027a920aa11ddf0275928ebd59c49973bc11c53baf4fbfa846ccd7ad4846b628", 0x64}, {&(0x7f0000000180)}, {&(0x7f00000003c0)="1e1da60d744fb9bffa4a106689fb3234b80e437d8632f5052ab1f61839b2cfe218e4df16a99f36473b2c0477d3a4391797e125e003faff5032f25b2f95076fb7ffed95ef8968ef3269a1a2fd7e0f020ce9800f9b4c0da3f27eccd0161e91f1b44da6913359011f84410ff3b7b3b2e18837dc56b061f8d0935373f09be4fc7282775d9b9edba761f20429d9187a1f457385fc288950aa67e956000d40ec9ad60bc67a7816465a389502b5303dc9a7e7e4fdec748fecdbf8017ff62be48349d171d65d3975c74fc73c5a1cccf7eff4d5f60c170f", 0xd3}, {&(0x7f00000002c0)="35e41404545d43", 0x7}, {&(0x7f00000004c0)="0476af5afc1867fa8a114bce63d7bd9076cae7b41daebfc03216062f686944f0d7d3ff00a7ffd43766eb6aaecfd6688cbdbc8d4b145755504e", 0x39}, {&(0x7f0000000500)="fc5cccf83ac3595aa7f49a59e3787d83b5ddd7f0600c9eb02e1ca2586927bd097de7c84abb34d496a962db5383825424dd8649a93a146c1b3dbef05640966a32ae51a3bb853764c803b64e51dfd85bf45ee379ab56a841c8ad4cc5adb7c7918999413366e64a06e24e7f37195d0f2e1e59421b65dfb359e7b7d8ecdb3fce47638e29a8a6decf7f6631ff7f463391c613883094a9e2a48a3d46a4267eaee6b98a31bbd39fa1b057a9bf0291e4d203991a52a2a89eb3f95196b39f78f6ef3d7cac070ac75814a8de28f9174e1537c2d1e820df", 0xd2}, {&(0x7f0000000600)="1f2c27bedbd19b176bf30e914a95f21c075c00c821768caa83259bb68c597ca7e6ae198716071541d0c3d2213c1617a22e8c17069907f6befcae904698c1444d86515a5613d801c7e5f3108e57fab6f3", 0x50}], 0x7, 0x10000, 0x4) ioctl$EXT4_IOC_GROUP_EXTEND(r3, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:41:51 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async, rerun: 64) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) (rerun: 64) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) (async) openat$cgroup_pressure(r0, &(0x7f0000000000)='cpu.pressure\x00', 0x2, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r1, 0x0, 0x0) 10:41:51 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r1, 0x0, 0x8000000000004) openat$cgroup_ro(r1, &(0x7f0000000080)='pids.events\x00', 0x0, 0x0) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000000400)={'#! ', './file0', [{}], 0xa, "20f3290596cfa4c8507c30bdb9f66e39476c63820094d8b0dab33251f054d80afca6571ab990b1928494dae7f6812b2b68849bde389d98309b5f0801b41238a9eff9c8f0d0a0d5b0ee09f05bd09fb305c6415dd00c87f457b48bf48637d14e2587c6538ac422bd7aea881960177045a61237cb57"}, 0x80) sendfile(0xffffffffffffffff, r2, 0x0, 0x8000000000004) bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f00000002c0)={0x18, 0xc, &(0x7f0000000340)=ANY=[@ANYBLOB="18000000050000000000000003000000185200000d00000000000000000000003f691800fcffffff186b00000100000000000000050000001835000002000000000000000000000018250000", @ANYRES32=r0, @ANYBLOB="00000000000800009500000000000000c470a46b4120ef6084910ce0c5eeb8eb153fef97de276d54a125e5491d806acc70ed9fa8880db3f96db2205783ede74e56db"], &(0x7f0000000000)='GPL\x00', 0x5, 0xf8, &(0x7f0000000100)=""/248, 0x41000, 0x2, '\x00', 0x0, 0x0, r0, 0x8, &(0x7f0000000200)={0x1, 0x2}, 0x8, 0x10, &(0x7f0000000240)={0x1, 0x3, 0x3ff}, 0x10, 0x0, 0x0, 0x0, &(0x7f0000000280)=[r2]}, 0x80) [ 2175.345389][ T7395] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2175.445920][ T7434] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:41:51 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r1, 0x0, 0x8000000000004) openat$cgroup_ro(r1, &(0x7f0000000080)='pids.events\x00', 0x0, 0x0) (async) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000000400)={'#! ', './file0', [{}], 0xa, "20f3290596cfa4c8507c30bdb9f66e39476c63820094d8b0dab33251f054d80afca6571ab990b1928494dae7f6812b2b68849bde389d98309b5f0801b41238a9eff9c8f0d0a0d5b0ee09f05bd09fb305c6415dd00c87f457b48bf48637d14e2587c6538ac422bd7aea881960177045a61237cb57"}, 0x80) (async) sendfile(0xffffffffffffffff, r2, 0x0, 0x8000000000004) (async) bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f00000002c0)={0x18, 0xc, &(0x7f0000000340)=ANY=[@ANYBLOB="18000000050000000000000003000000185200000d00000000000000000000003f691800fcffffff186b00000100000000000000050000001835000002000000000000000000000018250000", @ANYRES32=r0, @ANYBLOB="00000000000800009500000000000000c470a46b4120ef6084910ce0c5eeb8eb153fef97de276d54a125e5491d806acc70ed9fa8880db3f96db2205783ede74e56db"], &(0x7f0000000000)='GPL\x00', 0x5, 0xf8, &(0x7f0000000100)=""/248, 0x41000, 0x2, '\x00', 0x0, 0x0, r0, 0x8, &(0x7f0000000200)={0x1, 0x2}, 0x8, 0x10, &(0x7f0000000240)={0x1, 0x3, 0x3ff}, 0x10, 0x0, 0x0, 0x0, &(0x7f0000000280)=[r2]}, 0x80) [ 2175.549469][ T7395] 8021q: adding VLAN 0 to HW filter on device bond1400 10:41:51 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r0 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) ioctl$F2FS_IOC_MOVE_RANGE(r0, 0xc020f509, &(0x7f0000000000)={r0, 0x397aeaf0, 0x0, 0xab2}) sendmsg$RDMA_NLDEV_CMD_NEWLINK(r1, &(0x7f00000001c0)={&(0x7f0000000040)={0x10, 0x0, 0x0, 0x4000000}, 0xc, &(0x7f0000000180)={&(0x7f0000000080)={0x88, 0x1403, 0x0, 0x70bd29, 0x25dfdbfc, "", [{{0x9, 0x2, 'syz0\x00'}, {0x8, 0x41, 'siw\x00'}, {0x14, 0x33, 'pim6reg\x00'}}, {{0x9, 0x2, 'syz0\x00'}, {0x8, 0x41, 'rxe\x00'}, {0x14, 0x33, 'bridge_slave_1\x00'}}, {{0x9, 0x2, 'syz2\x00'}, {0x8, 0x41, 'siw\x00'}, {0x14, 0x33, 'veth0_to_team\x00'}}]}, 0x88}, 0x1, 0x0, 0x0, 0x40}, 0x0) sendmsg$nl_route(r0, 0x0, 0x0) [ 2175.639505][ T7446] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:41:51 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xd30b0000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:41:51 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r1, 0x0, 0x8000000000004) openat$cgroup_ro(r1, &(0x7f0000000080)='pids.events\x00', 0x0, 0x0) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async, rerun: 64) write$binfmt_script(r0, &(0x7f0000000400)={'#! ', './file0', [{}], 0xa, "20f3290596cfa4c8507c30bdb9f66e39476c63820094d8b0dab33251f054d80afca6571ab990b1928494dae7f6812b2b68849bde389d98309b5f0801b41238a9eff9c8f0d0a0d5b0ee09f05bd09fb305c6415dd00c87f457b48bf48637d14e2587c6538ac422bd7aea881960177045a61237cb57"}, 0x80) (rerun: 64) sendfile(0xffffffffffffffff, r2, 0x0, 0x8000000000004) bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f00000002c0)={0x18, 0xc, &(0x7f0000000340)=ANY=[@ANYBLOB="18000000050000000000000003000000185200000d00000000000000000000003f691800fcffffff186b00000100000000000000050000001835000002000000000000000000000018250000", @ANYRES32=r0, @ANYBLOB="00000000000800009500000000000000c470a46b4120ef6084910ce0c5eeb8eb153fef97de276d54a125e5491d806acc70ed9fa8880db3f96db2205783ede74e56db"], &(0x7f0000000000)='GPL\x00', 0x5, 0xf8, &(0x7f0000000100)=""/248, 0x41000, 0x2, '\x00', 0x0, 0x0, r0, 0x8, &(0x7f0000000200)={0x1, 0x2}, 0x8, 0x10, &(0x7f0000000240)={0x1, 0x3, 0x3ff}, 0x10, 0x0, 0x0, 0x0, &(0x7f0000000280)=[r2]}, 0x80) [ 2175.758626][ T7397] bond1400: (slave bridge1265): making interface the new active one [ 2175.777382][ T7397] bond1400: (slave bridge1265): Enslaving as an active interface with an up link [ 2175.789706][ T7430] netlink: 'syz-executor.3': attribute type 1 has an invalid length. [ 2175.858765][ T7454] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:41:52 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (async) accept(0xffffffffffffffff, 0x0, 0x0) (async) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) r0 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async, rerun: 32) ioctl$F2FS_IOC_MOVE_RANGE(r0, 0xc020f509, &(0x7f0000000000)={r0, 0x397aeaf0, 0x0, 0xab2}) (rerun: 32) sendmsg$RDMA_NLDEV_CMD_NEWLINK(r1, &(0x7f00000001c0)={&(0x7f0000000040)={0x10, 0x0, 0x0, 0x4000000}, 0xc, &(0x7f0000000180)={&(0x7f0000000080)={0x88, 0x1403, 0x0, 0x70bd29, 0x25dfdbfc, "", [{{0x9, 0x2, 'syz0\x00'}, {0x8, 0x41, 'siw\x00'}, {0x14, 0x33, 'pim6reg\x00'}}, {{0x9, 0x2, 'syz0\x00'}, {0x8, 0x41, 'rxe\x00'}, {0x14, 0x33, 'bridge_slave_1\x00'}}, {{0x9, 0x2, 'syz2\x00'}, {0x8, 0x41, 'siw\x00'}, {0x14, 0x33, 'veth0_to_team\x00'}}]}, 0x88}, 0x1, 0x0, 0x0, 0x40}, 0x0) (async) sendmsg$nl_route(r0, 0x0, 0x0) [ 2175.936190][ T7430] 8021q: adding VLAN 0 to HW filter on device bond1436 [ 2175.964998][ T7418] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2176.037263][ T7418] 8021q: adding VLAN 0 to HW filter on device bond844 [ 2176.091420][ T7442] bond1436: (slave bridge1335): making interface the new active one [ 2176.103702][ T7442] bond1436: (slave bridge1335): Enslaving as an active interface with an up link [ 2176.147535][ T7438] bond844: (slave bridge1001): making interface the new active one [ 2176.160620][ T7438] bond844: (slave bridge1001): Enslaving as an active interface with an up link 10:41:52 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r1, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) (async) mmap(&(0x7f0000ffc000/0x1000)=nil, 0x1000, 0x1000003, 0x100010, r1, 0x9357f000) (async) r2 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r3 = openat$cgroup_ro(r2, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r3, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) (async) ioctl$FS_IOC_RESVSP(r3, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r3, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r3, 0x0, 0x0) (async) bpf$ITER_CREATE(0x21, &(0x7f0000000000)={r3}, 0x8) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async, rerun: 64) write$binfmt_script(r0, &(0x7f0000000080)={'#! ', './file0', [{0x20, 'memory.events\x00'}], 0xa, "314ccee4b7548a95a374ec63044b45b7055e5b79663c59a8315f3558731109acf0910b678c1447eb891c96284cdbdec9a106517dcd5a7c81f0bf10f5268e78b3b58df9017898a79c36d2c02a6ffe295174573345298627c16b0e063891f75e107153966392c03fe79a2a0695f5aae209c740f4327a409600be77b8613a5d6be41e9dc7549a12faadbd949939545f415e350d28c74bb512180fce953dad86096fabfbf7bbd62f7987b90407f0d0f02d2d983713b2c697a7e928354399e9b88bd1abfaf7190b78603192c4f03bcee07145cd0c2dc38b9ce5d1e4a68c911889b45dc427fc41ae16aaca5e14e79570c214fa"}, 0x10a) (async, rerun: 64) r4 = accept4$nfc_llcp(r3, 0x0, &(0x7f00000002c0), 0x80800) r5 = openat$tun(0xffffffffffffff9c, &(0x7f0000000080), 0x2, 0x0) (async) r6 = socket$nl_route(0x10, 0x3, 0x0) (async, rerun: 32) r7 = socket$inet6_sctp(0xa, 0x5, 0x84) (rerun: 32) sendmmsg$inet6(r7, &(0x7f0000005900)=[{{&(0x7f0000000180)={0xa, 0x0, 0x0, @private1}, 0x1c, &(0x7f0000001680)=[{&(0x7f00000001c0)="1a", 0x1}], 0x1}}, {{&(0x7f0000002c80)={0xa, 0x0, 0x0, @ipv4={'\x00', '\xff\xff', @private=0xa010101}}, 0x1c, &(0x7f0000004180)=[{&(0x7f0000002d00)="92", 0x1}], 0x1}}], 0x2, 0x4000040) ioctl$BTRFS_IOC_BALANCE_PROGRESS(0xffffffffffffffff, 0x84009422, &(0x7f0000001740)={0x0, 0x0, {0x0, @usage, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, @struct}, {0x0, @usage, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, @struct}, {0x0, @struct}}) ioctl$BTRFS_IOC_SCRUB_PROGRESS(r7, 0xc400941d, &(0x7f00000007c0)={r8, 0x6, 0x6}) ioctl$BTRFS_IOC_SCRUB(r6, 0xc400941b, &(0x7f0000000940)={r8, 0x3f, 0x1, 0x1}) (async) ioctl$BTRFS_IOC_DEV_INFO(r5, 0xd000941e, &(0x7f00000004c0)={r8, "57149989cf1136de6b93f2f3e5ead599"}) ioctl$BTRFS_IOC_SCRUB_PROGRESS(r2, 0xc400941d, &(0x7f0000000400)={r8, 0x1, 0x6, 0x1}) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r9, 0x0, 0x8000000000004) ioctl$F2FS_IOC_MOVE_RANGE(r4, 0xc020f509, &(0x7f00000003c0)={r9, 0x0, 0x7, 0x2}) 10:41:52 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='cpuacct.usage_sys\x00', 0x275a, 0x0) r1 = syz_init_net_socket$bt_hci(0x1f, 0x3, 0x1) ioctl$BTRFS_IOC_QUOTA_RESCAN_WAIT(r1, 0x942e, 0x0) r2 = accept$inet6(r0, &(0x7f0000000000)={0xa, 0x0, 0x0, @local}, &(0x7f0000000080)=0x1c) ioctl$FS_IOC_SETVERSION(r2, 0x40087602, &(0x7f00000000c0)=0xfffffffffffffff9) r3 = bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000080)={0x6, 0x4, &(0x7f0000000b40)=ANY=[@ANYBLOB="18020000000000000000000000000000850000007b08000095000000000000003a64d725989c4db16f3baf71e8e1e0e0fda53265e4986a8c0c5df4890b46f7846cb7e88700f70c9f5bf25e11ea397e02a20816d06d8cf70fb1ce0f722145d23429d9e7cfb38f06d6534257aa696d61d62cf6a1a78b21361809b6a0395915bdb64fb487cb0fad8a71b0bbd7cd3d179cd7e2adb6b26e449c1d44b4b691a0aa23604699a34918f4f3bdb02390cab447693161cfe5ce5e07eae50ceb073b257290594a250afbbfdd2d0fedeca5db427aaf183a78f82a56aa9868077e2bb77d396224335038f286ca71f5d1b5dc035167eb7c76d84b5a07fea66f1738894a4ea1ccc9e9049ef948b9e97de4f14e721fa1ba5af84ad4"], &(0x7f0000000100)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) bpf$BPF_PROG_TEST_RUN(0xa, &(0x7f0000000000)={r3, 0xf000, 0x0, 0x41, 0x0, 0x0, 0x41, 0x0, 0x0, 0x0, 0x0, 0x0}, 0x48) ioctl$F2FS_IOC_MOVE_RANGE(r3, 0xc020f509, &(0x7f0000000140)={r3}) r4 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r4, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f00000001c0)=0xfffffffffffffbff) 10:41:52 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xba020000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:41:52 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xfa030000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2176.276056][ T7459] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 10:41:52 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) (async) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r0 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) ioctl$F2FS_IOC_MOVE_RANGE(r0, 0xc020f509, &(0x7f0000000000)={r0, 0x397aeaf0, 0x0, 0xab2}) sendmsg$RDMA_NLDEV_CMD_NEWLINK(r1, &(0x7f00000001c0)={&(0x7f0000000040)={0x10, 0x0, 0x0, 0x4000000}, 0xc, &(0x7f0000000180)={&(0x7f0000000080)={0x88, 0x1403, 0x0, 0x70bd29, 0x25dfdbfc, "", [{{0x9, 0x2, 'syz0\x00'}, {0x8, 0x41, 'siw\x00'}, {0x14, 0x33, 'pim6reg\x00'}}, {{0x9, 0x2, 'syz0\x00'}, {0x8, 0x41, 'rxe\x00'}, {0x14, 0x33, 'bridge_slave_1\x00'}}, {{0x9, 0x2, 'syz2\x00'}, {0x8, 0x41, 'siw\x00'}, {0x14, 0x33, 'veth0_to_team\x00'}}]}, 0x88}, 0x1, 0x0, 0x0, 0x40}, 0x0) sendmsg$nl_route(r0, 0x0, 0x0) (async) sendmsg$nl_route(r0, 0x0, 0x0) [ 2176.365740][ T7473] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:41:52 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='cpuacct.usage_sys\x00', 0x275a, 0x0) r1 = syz_init_net_socket$bt_hci(0x1f, 0x3, 0x1) ioctl$BTRFS_IOC_QUOTA_RESCAN_WAIT(r1, 0x942e, 0x0) r2 = accept$inet6(r0, &(0x7f0000000000)={0xa, 0x0, 0x0, @local}, &(0x7f0000000080)=0x1c) ioctl$FS_IOC_SETVERSION(r2, 0x40087602, &(0x7f00000000c0)=0xfffffffffffffff9) r3 = bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000080)={0x6, 0x4, &(0x7f0000000b40)=ANY=[@ANYBLOB="18020000000000000000000000000000850000007b08000095000000000000003a64d725989c4db16f3baf71e8e1e0e0fda53265e4986a8c0c5df4890b46f7846cb7e88700f70c9f5bf25e11ea397e02a20816d06d8cf70fb1ce0f722145d23429d9e7cfb38f06d6534257aa696d61d62cf6a1a78b21361809b6a0395915bdb64fb487cb0fad8a71b0bbd7cd3d179cd7e2adb6b26e449c1d44b4b691a0aa23604699a34918f4f3bdb02390cab447693161cfe5ce5e07eae50ceb073b257290594a250afbbfdd2d0fedeca5db427aaf183a78f82a56aa9868077e2bb77d396224335038f286ca71f5d1b5dc035167eb7c76d84b5a07fea66f1738894a4ea1ccc9e9049ef948b9e97de4f14e721fa1ba5af84ad4"], &(0x7f0000000100)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) bpf$BPF_PROG_TEST_RUN(0xa, &(0x7f0000000000)={r3, 0xf000, 0x0, 0x41, 0x0, 0x0, 0x41, 0x0, 0x0, 0x0, 0x0, 0x0}, 0x48) ioctl$F2FS_IOC_MOVE_RANGE(r3, 0xc020f509, &(0x7f0000000140)={r3}) r4 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r4, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f00000001c0)=0xfffffffffffffbff) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='cpuacct.usage_sys\x00', 0x275a, 0x0) (async) syz_init_net_socket$bt_hci(0x1f, 0x3, 0x1) (async) ioctl$BTRFS_IOC_QUOTA_RESCAN_WAIT(r1, 0x942e, 0x0) (async) accept$inet6(r0, &(0x7f0000000000)={0xa, 0x0, 0x0, @local}, &(0x7f0000000080)=0x1c) (async) ioctl$FS_IOC_SETVERSION(r2, 0x40087602, &(0x7f00000000c0)=0xfffffffffffffff9) (async) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000080)={0x6, 0x4, &(0x7f0000000b40)=ANY=[@ANYBLOB="18020000000000000000000000000000850000007b08000095000000000000003a64d725989c4db16f3baf71e8e1e0e0fda53265e4986a8c0c5df4890b46f7846cb7e88700f70c9f5bf25e11ea397e02a20816d06d8cf70fb1ce0f722145d23429d9e7cfb38f06d6534257aa696d61d62cf6a1a78b21361809b6a0395915bdb64fb487cb0fad8a71b0bbd7cd3d179cd7e2adb6b26e449c1d44b4b691a0aa23604699a34918f4f3bdb02390cab447693161cfe5ce5e07eae50ceb073b257290594a250afbbfdd2d0fedeca5db427aaf183a78f82a56aa9868077e2bb77d396224335038f286ca71f5d1b5dc035167eb7c76d84b5a07fea66f1738894a4ea1ccc9e9049ef948b9e97de4f14e721fa1ba5af84ad4"], &(0x7f0000000100)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) (async) bpf$BPF_PROG_TEST_RUN(0xa, &(0x7f0000000000)={r3, 0xf000, 0x0, 0x41, 0x0, 0x0, 0x41, 0x0, 0x0, 0x0, 0x0, 0x0}, 0x48) (async) ioctl$F2FS_IOC_MOVE_RANGE(r3, 0xc020f509, &(0x7f0000000140)={r3}) (async) openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) (async) openat$cgroup_ro(r4, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f00000001c0)=0xfffffffffffffbff) (async) 10:41:52 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='cpuacct.usage_sys\x00', 0x275a, 0x0) r1 = syz_init_net_socket$bt_hci(0x1f, 0x3, 0x1) ioctl$BTRFS_IOC_QUOTA_RESCAN_WAIT(r1, 0x942e, 0x0) r2 = accept$inet6(r0, &(0x7f0000000000)={0xa, 0x0, 0x0, @local}, &(0x7f0000000080)=0x1c) ioctl$FS_IOC_SETVERSION(r2, 0x40087602, &(0x7f00000000c0)=0xfffffffffffffff9) r3 = bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000080)={0x6, 0x4, &(0x7f0000000b40)=ANY=[@ANYBLOB="18020000000000000000000000000000850000007b08000095000000000000003a64d725989c4db16f3baf71e8e1e0e0fda53265e4986a8c0c5df4890b46f7846cb7e88700f70c9f5bf25e11ea397e02a20816d06d8cf70fb1ce0f722145d23429d9e7cfb38f06d6534257aa696d61d62cf6a1a78b21361809b6a0395915bdb64fb487cb0fad8a71b0bbd7cd3d179cd7e2adb6b26e449c1d44b4b691a0aa23604699a34918f4f3bdb02390cab447693161cfe5ce5e07eae50ceb073b257290594a250afbbfdd2d0fedeca5db427aaf183a78f82a56aa9868077e2bb77d396224335038f286ca71f5d1b5dc035167eb7c76d84b5a07fea66f1738894a4ea1ccc9e9049ef948b9e97de4f14e721fa1ba5af84ad4"], &(0x7f0000000100)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) bpf$BPF_PROG_TEST_RUN(0xa, &(0x7f0000000000)={r3, 0xf000, 0x0, 0x41, 0x0, 0x0, 0x41, 0x0, 0x0, 0x0, 0x0, 0x0}, 0x48) ioctl$F2FS_IOC_MOVE_RANGE(r3, 0xc020f509, &(0x7f0000000140)={r3}) r4 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r4, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f00000001c0)=0xfffffffffffffbff) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='cpuacct.usage_sys\x00', 0x275a, 0x0) (async) syz_init_net_socket$bt_hci(0x1f, 0x3, 0x1) (async) ioctl$BTRFS_IOC_QUOTA_RESCAN_WAIT(r1, 0x942e, 0x0) (async) accept$inet6(r0, &(0x7f0000000000)={0xa, 0x0, 0x0, @local}, &(0x7f0000000080)=0x1c) (async) ioctl$FS_IOC_SETVERSION(r2, 0x40087602, &(0x7f00000000c0)=0xfffffffffffffff9) (async) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000080)={0x6, 0x4, &(0x7f0000000b40)=ANY=[@ANYBLOB="18020000000000000000000000000000850000007b08000095000000000000003a64d725989c4db16f3baf71e8e1e0e0fda53265e4986a8c0c5df4890b46f7846cb7e88700f70c9f5bf25e11ea397e02a20816d06d8cf70fb1ce0f722145d23429d9e7cfb38f06d6534257aa696d61d62cf6a1a78b21361809b6a0395915bdb64fb487cb0fad8a71b0bbd7cd3d179cd7e2adb6b26e449c1d44b4b691a0aa23604699a34918f4f3bdb02390cab447693161cfe5ce5e07eae50ceb073b257290594a250afbbfdd2d0fedeca5db427aaf183a78f82a56aa9868077e2bb77d396224335038f286ca71f5d1b5dc035167eb7c76d84b5a07fea66f1738894a4ea1ccc9e9049ef948b9e97de4f14e721fa1ba5af84ad4"], &(0x7f0000000100)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) (async) bpf$BPF_PROG_TEST_RUN(0xa, &(0x7f0000000000)={r3, 0xf000, 0x0, 0x41, 0x0, 0x0, 0x41, 0x0, 0x0, 0x0, 0x0, 0x0}, 0x48) (async) ioctl$F2FS_IOC_MOVE_RANGE(r3, 0xc020f509, &(0x7f0000000140)={r3}) (async) openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) (async) openat$cgroup_ro(r4, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f00000001c0)=0xfffffffffffffbff) (async) 10:41:52 executing program 2: accept4(0xffffffffffffffff, &(0x7f0000000080)=@pppol2tp={0x18, 0x1, {0x0, 0xffffffffffffffff, {0x2, 0x0, @multicast1}}}, &(0x7f0000000000)=0x80, 0x80800) sendmsg$NL80211_CMD_SET_REG(r0, &(0x7f00000001c0)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x10000}, 0xc, &(0x7f0000000180)={&(0x7f0000000140)={0x2c, 0x0, 0x10, 0x70bd26, 0x25dfdbfb, {}, [@NL80211_ATTR_WIPHY={0x8, 0x1, 0x2c}, @NL80211_ATTR_REG_ALPHA2={0x6, 0x21, 'b\x00'}, @NL80211_ATTR_SOCKET_OWNER={0x4}, @NL80211_ATTR_SOCKET_OWNER={0x4}]}, 0x2c}, 0x1, 0x0, 0x0, 0x4004040}, 0x8000) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2176.535684][ T7497] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2176.549696][ T7459] 8021q: adding VLAN 0 to HW filter on device bond1401 10:41:52 executing program 4: ioctl$F2FS_IOC_MOVE_RANGE(0xffffffffffffffff, 0xc020f509, &(0x7f0000000000)={0xffffffffffffffff, 0x0, 0x8, 0xdd8}) r1 = openat$cgroup_ro(r0, &(0x7f0000000080)='hugetlb.1GB.rsvd.usage_in_bytes\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2176.742172][ T7510] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2176.817847][ T7460] bond1401: (slave bridge1266): making interface the new active one 10:41:52 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xd50b0000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:41:52 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) openat$cgroup_ro(r0, &(0x7f0000000080)='blkio.throttle.io_service_bytes_recursive\x00', 0x0, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r1, 0x0, 0x8000000000004) r2 = socket$caif_stream(0x25, 0x1, 0x3) r3 = accept4(r2, &(0x7f0000000180)=@pptp={0x18, 0x2, {0x0, @initdev}}, &(0x7f0000000000)=0xffe0, 0x1000) sendmsg$nl_route(r3, 0x0, 0x4) 10:41:52 executing program 4: ioctl$F2FS_IOC_MOVE_RANGE(0xffffffffffffffff, 0xc020f509, &(0x7f0000000000)={0xffffffffffffffff, 0x0, 0x8, 0xdd8}) (async) ioctl$F2FS_IOC_MOVE_RANGE(0xffffffffffffffff, 0xc020f509, &(0x7f0000000000)={0xffffffffffffffff, 0x0, 0x8, 0xdd8}) r1 = openat$cgroup_ro(r0, &(0x7f0000000080)='hugetlb.1GB.rsvd.usage_in_bytes\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:41:52 executing program 2: accept4(0xffffffffffffffff, &(0x7f0000000080)=@pppol2tp={0x18, 0x1, {0x0, 0xffffffffffffffff, {0x2, 0x0, @multicast1}}}, &(0x7f0000000000)=0x80, 0x80800) sendmsg$NL80211_CMD_SET_REG(r0, &(0x7f00000001c0)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x10000}, 0xc, &(0x7f0000000180)={&(0x7f0000000140)={0x2c, 0x0, 0x10, 0x70bd26, 0x25dfdbfb, {}, [@NL80211_ATTR_WIPHY={0x8, 0x1, 0x2c}, @NL80211_ATTR_REG_ALPHA2={0x6, 0x21, 'b\x00'}, @NL80211_ATTR_SOCKET_OWNER={0x4}, @NL80211_ATTR_SOCKET_OWNER={0x4}]}, 0x2c}, 0x1, 0x0, 0x0, 0x4004040}, 0x8000) (async) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2176.859146][ T7460] bond1401: (slave bridge1266): Enslaving as an active interface with an up link [ 2176.883330][ T7478] netlink: 'syz-executor.3': attribute type 1 has an invalid length. 10:41:53 executing program 2: accept4(0xffffffffffffffff, &(0x7f0000000080)=@pppol2tp={0x18, 0x1, {0x0, 0xffffffffffffffff, {0x2, 0x0, @multicast1}}}, &(0x7f0000000000)=0x80, 0x80800) sendmsg$NL80211_CMD_SET_REG(r0, &(0x7f00000001c0)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x10000}, 0xc, &(0x7f0000000180)={&(0x7f0000000140)={0x2c, 0x0, 0x10, 0x70bd26, 0x25dfdbfb, {}, [@NL80211_ATTR_WIPHY={0x8, 0x1, 0x2c}, @NL80211_ATTR_REG_ALPHA2={0x6, 0x21, 'b\x00'}, @NL80211_ATTR_SOCKET_OWNER={0x4}, @NL80211_ATTR_SOCKET_OWNER={0x4}]}, 0x2c}, 0x1, 0x0, 0x0, 0x4004040}, 0x8000) (async) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2177.084768][ T7478] 8021q: adding VLAN 0 to HW filter on device bond1437 [ 2177.223265][ T7486] bond1437: (slave bridge1336): making interface the new active one [ 2177.248622][ T7486] bond1437: (slave bridge1336): Enslaving as an active interface with an up link [ 2177.282532][ T7500] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 10:41:53 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xc4010000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:41:53 executing program 4: ioctl$F2FS_IOC_MOVE_RANGE(0xffffffffffffffff, 0xc020f509, &(0x7f0000000000)={0xffffffffffffffff, 0x0, 0x8, 0xdd8}) r1 = openat$cgroup_ro(r0, &(0x7f0000000080)='hugetlb.1GB.rsvd.usage_in_bytes\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:41:53 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) openat$cgroup_ro(r0, &(0x7f0000000080)='blkio.throttle.io_service_bytes_recursive\x00', 0x0, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$netlink(0x10, 0x3, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r1, 0x0, 0x8000000000004) (async) r2 = socket$caif_stream(0x25, 0x1, 0x3) r3 = accept4(r2, &(0x7f0000000180)=@pptp={0x18, 0x2, {0x0, @initdev}}, &(0x7f0000000000)=0xffe0, 0x1000) sendmsg$nl_route(r3, 0x0, 0x4) 10:41:53 executing program 2: r0 = socket$inet6_sctp(0xa, 0x5, 0x84) getsockopt$inet_sctp6_SCTP_SOCKOPT_CONNECTX3(r0, 0x84, 0x6f, &(0x7f0000000000)={0x0, 0x1c, &(0x7f0000000080)=[@in6={0xa, 0x0, 0x0, @rand_addr=' \x01\x00'}]}, &(0x7f0000000180)=0x10) r1 = socket$inet_sctp(0x2, 0x1, 0x84) getsockopt$inet_sctp_SCTP_MAX_BURST(r1, 0x84, 0xd, &(0x7f0000000000)=@assoc_value={0x0}, &(0x7f0000000240)=0x8) getsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(r0, 0x84, 0x9, &(0x7f00000000c0)={r2, @in6={{0xa, 0x4e24, 0x9, @private0, 0x2}}, 0x0, 0xad34, 0x0, 0x0, 0x0, 0x0, 0x1f}, &(0x7f00000001c0)=0x9c) setsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(0xffffffffffffffff, 0x84, 0x9, &(0x7f0000000080)={r2, @in6={{0xa, 0x4e21, 0x1, @loopback, 0xc000000}}, 0xff, 0x4, 0x3, 0x2008, 0x5, 0x80000}, 0x9c) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r4 = bpf$BPF_BTF_LOAD(0x12, &(0x7f0000000200)={&(0x7f0000000280)={{0xeb9f, 0x1, 0x0, 0x18, 0x0, 0x9f, 0x9f, 0x8, [@array={0x0, 0x0, 0x0, 0x3, 0x0, {0x4, 0x3, 0x3}}, @enum={0x7, 0x1, 0x0, 0x6, 0x4, [{0xb, 0x800}]}, @func_proto={0x0, 0x2, 0x0, 0xd, 0x0, [{0xf}, {0xd}]}, @volatile={0xe, 0x0, 0x0, 0x9, 0x3}, @datasec={0xc, 0x1, 0x0, 0xf, 0x3, [{0x5, 0x8, 0xc2e}], "7bf642"}, @struct={0xf, 0x2, 0x0, 0x4, 0x1, 0x6, [{0x1, 0x4, 0xffffffff}, {0x9, 0x2, 0xfd}]}, @typedef={0x7, 0x0, 0x0, 0x8, 0x1}]}, {0x0, [0x30, 0x71, 0x5f, 0x2e, 0x5f, 0x2e]}}, &(0x7f0000000340)=""/196, 0xc0, 0xc4}, 0x20) ioctl$FS_IOC_GETVERSION(r1, 0x80087601, &(0x7f0000000480)) ioctl$FS_IOC_RESVSP(r4, 0x40305828, &(0x7f0000000440)={0x0, 0x3, 0x8, 0x3}) ioctl$EXT4_IOC_GROUP_EXTEND(r3, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:41:53 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xfe030000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2177.306936][ T7500] workqueue: Failed to create a rescuer kthread for wq "bond845": -EINTR [ 2177.630339][ T7525] 8021q: adding VLAN 0 to HW filter on device bond1402 10:41:53 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) pipe(&(0x7f00000001c0)={0xffffffffffffffff}) close(r1) bpf$BPF_LINK_CREATE(0x1c, &(0x7f0000000000)={r0, r1, 0x1e}, 0x10) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r2) ioctl$FS_IOC_SETFSLABEL(r2, 0x41009432, &(0x7f00000000c0)="3c980c66d8a95c96bc8bb505b90034b3c7ae3f64c91342831cba04ff39d809620fa7413591bcfb26c0048fa0ef888107ca144527f2928b0bd26d003cedb1e2c94ee8811280b2ba464ad23d5e525d10b95a21e80e42dd5bf93ec3e9691663860c1d6d7d8a804a49af730b537c710ac5a7a4d97b050eaf515f4375d98e97d1bdedded754f513e2dd8a2b2de2105fb325e94143aa010f90548ae874cf8a5e2baaf44d8b00b2c0c2a51d37d84f9586a9173c7e4051e84a6d03472268ce86965a567077b83b4c630d87466b9ef9de7cc694735104a2e46710a84def90aa2ac18e32aa857afbd99a89b577c6bae2215e1f660ac013f681faba99cd4d2b66658f4eec52") 10:41:53 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) openat$cgroup_ro(r0, &(0x7f0000000080)='blkio.throttle.io_service_bytes_recursive\x00', 0x0, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) (async) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async, rerun: 32) socket$netlink(0x10, 0x3, 0x0) (rerun: 32) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r1, 0x0, 0x8000000000004) (async) r2 = socket$caif_stream(0x25, 0x1, 0x3) r3 = accept4(r2, &(0x7f0000000180)=@pptp={0x18, 0x2, {0x0, @initdev}}, &(0x7f0000000000)=0xffe0, 0x1000) sendmsg$nl_route(r3, 0x0, 0x4) [ 2177.841947][ T7531] bond1402: (slave bridge1267): making interface the new active one [ 2177.895768][ T7531] bond1402: (slave bridge1267): Enslaving as an active interface with an up link 10:41:54 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xd6030000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:41:54 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) pipe(&(0x7f00000001c0)={0xffffffffffffffff}) close(r1) bpf$BPF_LINK_CREATE(0x1c, &(0x7f0000000000)={r0, r1, 0x1e}, 0x10) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r2) (async) ioctl$FS_IOC_SETFSLABEL(r2, 0x41009432, &(0x7f00000000c0)="3c980c66d8a95c96bc8bb505b90034b3c7ae3f64c91342831cba04ff39d809620fa7413591bcfb26c0048fa0ef888107ca144527f2928b0bd26d003cedb1e2c94ee8811280b2ba464ad23d5e525d10b95a21e80e42dd5bf93ec3e9691663860c1d6d7d8a804a49af730b537c710ac5a7a4d97b050eaf515f4375d98e97d1bdedded754f513e2dd8a2b2de2105fb325e94143aa010f90548ae874cf8a5e2baaf44d8b00b2c0c2a51d37d84f9586a9173c7e4051e84a6d03472268ce86965a567077b83b4c630d87466b9ef9de7cc694735104a2e46710a84def90aa2ac18e32aa857afbd99a89b577c6bae2215e1f660ac013f681faba99cd4d2b66658f4eec52") 10:41:54 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) r1 = accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r2 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r2, 0x0, 0x0) r3 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r4 = openat$cgroup_ro(r3, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r4, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r4, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r4, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r4, 0x0, 0x0) r5 = socket$nl_route(0x10, 0x3, 0x0) r6 = socket(0x1, 0x803, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r5, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000180)=@newlink={0x3c, 0x10, 0x403, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @vlan={{0x9}, {0x4}}}, @IFLA_MASTER={0x8, 0x4, r7}]}, 0x3c}}, 0x0) ioctl$ifreq_SIOCGIFINDEX_batadv_hard(r0, 0x8933, &(0x7f0000000040)={'batadv_slave_0\x00', 0x0}) r9 = socket$nl_route(0x10, 0x3, 0x0) r10 = socket(0x1, 0x803, 0x0) getsockname$packet(r10, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r9, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000180)=@newlink={0x3c, 0x10, 0x403, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @vlan={{0x9}, {0x4}}}, @IFLA_MASTER={0x8, 0x4, r11}]}, 0x3c}}, 0x0) ioctl$sock_ipv4_tunnel_SIOCADDTUNNEL(r1, 0x89f1, &(0x7f0000000080)={'syztnl0\x00', &(0x7f0000000180)={'erspan0\x00', 0x0, 0x8000, 0xc0, 0x4bffa6b6, 0xc33, {{0x30, 0x4, 0x2, 0x10, 0xc0, 0x64, 0x0, 0x0, 0x4, 0x0, @local, @multicast1, {[@lsrr={0x83, 0x27, 0x88, [@loopback, @remote, @dev={0xac, 0x14, 0x14, 0x1f}, @dev={0xac, 0x14, 0x14, 0x39}, @loopback, @multicast2, @remote, @private=0xa010100, @broadcast]}, @cipso={0x86, 0x31, 0xa68f1957ca170c49, [{0x7, 0x5, "2b68b5"}, {0x0, 0xc, "bd6a909a9899389fcb78"}, {0x1, 0x2}, {0x7, 0xc, "317b8617345c148f4008"}, {0x6, 0xc, "e48931abd84a84bf40fc"}]}, @ra={0x94, 0x4, 0x1}, @rr={0x7, 0xb, 0xf9, [@private=0xa010100, @multicast1]}, @timestamp_addr={0x44, 0x44, 0xc1, 0x1, 0xa, [{@rand_addr=0x64010100, 0x9}, {@initdev={0xac, 0x1e, 0x1, 0x0}, 0x1000}, {@multicast1, 0x8001}, {@rand_addr=0x64010101, 0x40}, {@local, 0x2000}, {@initdev={0xac, 0x1e, 0x0, 0x0}, 0x1}, {@initdev={0xac, 0x1e, 0x0, 0x0}, 0xfffffff8}, {@private=0xa010100, 0x5}]}]}}}}}) sendmsg$nl_route_sched(r4, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f00000000c0)={&(0x7f0000000400)=@newtaction={0x6dac, 0x30, 0x200, 0x70bd2c, 0x25dfdbfb, {}, [{0x4d4, 0x1, [@m_mpls={0x14c, 0x17, 0x0, 0x0, {{0x9}, {0x60, 0x2, 0x0, 0x1, [@TCA_MPLS_TC={0x5, 0x6, 0x7}, @TCA_MPLS_LABEL={0x8, 0x5, 0xe1cc8}, @TCA_MPLS_LABEL={0x8, 0x5, 0xbb4b3}, @TCA_MPLS_PARMS={0x1c, 0x2, {{0x401, 0x7fffffff, 0x7, 0x3, 0x6}, 0x2}}, @TCA_MPLS_LABEL={0x8, 0x5, 0x8ff28}, @TCA_MPLS_PROTO={0x6, 0x4, 0x88f5}, @TCA_MPLS_LABEL={0x8, 0x5, 0xa9f14}, @TCA_MPLS_PROTO={0x6, 0x4, 0x9000}, @TCA_MPLS_TC={0x5, 0x6, 0x6}]}, {0xc1, 0x6, "8ba73108021a0532b17f53d4d7d5a4a5b7f12647d76baa978c4eb1b9ddc7f67e6bf7989c39079c02983698608144fafb09425343ca58f160fd1a06456cfbe30512db8afb3804f0194840271a965ce77c7dbd9590b46202e83d6c119bd091833989e4c04635f80869f68544fd33310f22542e6b10a391f443b32037d18aad683c90d84cc1e300f778c18e202534caa59ba39b30b514d1e5d9bc356c1c3f42acf56c09986096b019504fefb866326a18ca4020383a28736f7f7eb7441c21"}, {0xc, 0x7, {0x1, 0x1}}, {0xc, 0x8, {0x0, 0x1}}}}, @m_xt={0xc0, 0x15, 0x0, 0x0, {{0x7}, {0x14, 0x2, 0x0, 0x1, [@TCA_IPT_INDEX={0x8, 0x3, 0x5}, @TCA_IPT_INDEX={0x8, 0x3, 0x3}]}, {0x86, 0x6, "6fff40c44e65569c9d45c0be51936f5ccfe37efcd474a9d04aa434d88165ff17625bb184058fc9d66ae35373254d6a83cb8d18244c613b9dea81bda7d79abc2fe88eb0a61bb9ab1c62637bc7ffc5b37b9e0d89aa0f920c1943e94594aee3f5641245060900abb6f384cc585afdb7f02557b4bf010b22cc80bd82f065df522e44075e"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x1, 0x3}}}}, @m_connmark={0xfc, 0x9, 0x0, 0x0, {{0xd}, {0xac, 0x2, 0x0, 0x1, [@TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x4, 0x80000001, 0x5, 0x8, 0x6}, 0xa89}}, @TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x8, 0x963e, 0x5, 0x703f, 0x9}, 0x8}}, @TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x25f0, 0x400, 0x2, 0xfffffffc, 0x8}, 0x9}}, @TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x3ff, 0x9, 0x8, 0x7, 0x6}, 0x4}}, @TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x6, 0x29, 0x6, 0x6, 0x2}, 0xca5e}}, @TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x5, 0x7f, 0xffffffffffffffff, 0x1, 0x81}, 0x2}}]}, {0x24, 0x6, "c387dbe2c6dd6d34897459526e144ec5ac3b8c9b8218f27fea4b843326c185f9"}, {0xc}, {0xc, 0x8, {0x1, 0x2}}}}, @m_mpls={0xb8, 0x1a, 0x0, 0x0, {{0x9}, {0x24, 0x2, 0x0, 0x1, [@TCA_MPLS_TTL={0x5, 0x7, 0x40}, @TCA_MPLS_PROTO={0x6, 0x4, 0x88f8}, @TCA_MPLS_TTL={0x5, 0x7, 0x46}, @TCA_MPLS_TTL={0x5, 0x7, 0x8e}]}, {0x6c, 0x6, "3bc9286a2fe4c2e2c932b2e656d8583eec41af4595023cc4d6b4de02be435be75708fab2f04cce87a1467245514e2ffb90316bb58d2657ebfb644e7706519a9f76832274acdd474324a355883defa9275b65692ea3683eba43fe21e523ce14690d14eace71b7a2fa"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x2, 0x2}}}}, @m_ctinfo={0x110, 0x8, 0x0, 0x0, {{0xb}, {0x4c, 0x2, 0x0, 0x1, [@TCA_CTINFO_PARMS_DSCP_MASK={0x8, 0x5, 0x86c}, @TCA_CTINFO_PARMS_DSCP_MASK={0x8, 0x5, 0xd09}, @TCA_CTINFO_ACT={0x18, 0x3, {0x3968, 0x3f, 0x0, 0xe1ae, 0x6}}, @TCA_CTINFO_PARMS_DSCP_MASK={0x8, 0x5, 0x9e}, @TCA_CTINFO_ACT={0x18, 0x3, {0x3, 0x3, 0x5, 0xfffffffe, 0x2408eaf6}}]}, {0x99, 0x6, "b5019a429f1cc5a17c713a62d95586ce546dabd8bc0cc16edfab0f71357cdb428381534a495848655ed999837fbe8d129f7f5e9e5014bf54d0136fcd5f3809260b465f1fb0b584c62289695fe7faf1d51fabbff0fccc785b7e49a04ea9d450fce2c64cf7175dfbb43de8da60f33255c9a950a36c7393df50a22f64da4072c38184630f75ab1f1a3ac4c866a96054f115cd5a04dfd1"}, {0xc}, {0xc, 0x8, {0x2, 0x3}}}}]}, {0x60c, 0x1, [@m_connmark={0x158, 0x18, 0x0, 0x0, {{0xd}, {0x3c, 0x2, 0x0, 0x1, [@TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x3f, 0x1, 0x10000000, 0x20, 0xed3a}, 0x1}}, @TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x7, 0x2, 0x20000000, 0x9, 0x4971}, 0x1}}]}, {0xed, 0x6, "ef1e8908a063b1a4702405531a175033a476526dfa656a00d505677b10ee9ff65f780e80bc862661ffceeffd5a10e8d32a1fe08e5b24b0fb87a4812d8ee82375928c1a9ac08af2c6dd1e378e3d21b24b555cc06166f2e8a1a92b8fa2be12b5cec402d27706362816ddafb2ee918c507c788baf60d8c3ce1480db393bd8e8d7562ad84551395f6a163b3a74be6648ba27b378274440f2173dd1d70ca8b7404ccc9dbe74ae4c115ac0996b02ca3cb472666b41a88d3bb5d4cd87e52e9accba0a28d37fb7a1ea027007a7a3083249bdaf7175ef8dac9fb255ee6341a89271a703fb537a45d658df889df9"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x1, 0x2}}}}, @m_vlan={0x60, 0x12, 0x0, 0x0, {{0x9}, {0x30, 0x2, 0x0, 0x1, [@TCA_VLAN_PUSH_VLAN_ID={0x6, 0x3, 0x88e}, @TCA_VLAN_PUSH_VLAN_PROTOCOL={0x6, 0x4, 0x88a8}, @TCA_VLAN_PARMS={0x1c, 0x2, {{0x0, 0x200, 0x0, 0x2, 0x4}, 0x3}}]}, {0x7, 0x6, "80c4b0"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x3, 0x3}}}}, @m_csum={0xe8, 0x1c, 0x0, 0x0, {{0x9}, {0x20, 0x2, 0x0, 0x1, [@TCA_CSUM_PARMS={0x1c, 0x1, {{0x43, 0x2, 0x3, 0x4, 0x4ed}, 0x7c}}]}, {0x9e, 0x6, "8d2d1a4fc178bdd9af7215770320ee5d189796b9523885d646b9faad72171995ba0cb9b53f04e5fed40ddf5887ad50a044d785a2b6e11736e8f506ee3925b6cd964a3c754d5e75751422e595c3a681e54e8dbd9c573a21a9124392cff8b41422d1cceefa07091cda24e9e334d5b83cdeecfbd53be23d85a20c1e27855b9b1bdccb118e59bdb68e52b174d58f58c1014ef0a59d0a280533d7c5c2"}, {0xc, 0x7, {0x1, 0x1}}, {0xc, 0x8, {0x1, 0x3}}}}, @m_ct={0x120, 0x13, 0x0, 0x0, {{0x7}, {0x64, 0x2, 0x0, 0x1, [@TCA_CT_PARMS={0x18, 0x1, {0x7, 0x0, 0x3, 0x9, 0x1}}, @TCA_CT_MARK_MASK={0x8, 0x6, 0x9}, @TCA_CT_NAT_IPV6_MIN={0x14, 0xb, @remote}, @TCA_CT_NAT_IPV4_MIN={0x8, 0x9, @multicast1}, @TCA_CT_LABELS={0x14, 0x7, "e817e941fccd601a0241835ba7bf6acf"}, @TCA_CT_ACTION={0x6, 0x3, 0x1a}, @TCA_CT_NAT_PORT_MIN={0x6, 0xd, 0x4e24}]}, {0x97, 0x6, "c1220e078d92ad552f095b77da0c43c010315f85d69609130bd223c1257ca62f2ea94b2203ff230898e2afe5c7175f57dde2aeee8bd716cd86c1bec5275aeadb78c16e27aaaa47b0d1ca3142be7316e89d44ae4d73148aad37fb64ab91fd7289c874d6bcbfb874bace16f13e8f04cedff00022370917f89069a874267853f3a83697bd7fdce8c59761c1d90ef95d4f4d246db3"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x1, 0x3}}}}, @m_bpf={0x11c, 0x11, 0x0, 0x0, {{0x8}, {0x10, 0x2, 0x0, 0x1, [@TCA_ACT_BPF_NAME={0xc, 0x6, './file0\x00'}]}, {0xe5, 0x6, "6b9dab9f6fd939cc8613e252c3955e11b7698e4be3ce6e84222a2d535aea2af1288c77e2f1c4d06fa5cc837a86730cd7b37f67ed44c6596d420101516176be79b9489b878142e831fdc105abbf8da0255781d0e58195d79d80c71d8bc1e3b8bc515b8d6e2968e075897670729dbba840171bb38595cca57fe1982b190646ad4c6129c808b32748adac3b9f6bdaf889e962e822ac10869126c0190aaec2637a5b0fafdc782835d05ffc83f312d2ef25031e02ad44cd5e09ad29ace41583b95517ca021d03cdf7367eb9eb680038c4664bd407dfaf73c919ced26f722c160b2c6254"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x1, 0x2}}}}, @m_ct={0x12c, 0x1f, 0x0, 0x0, {{0x7}, {0x20, 0x2, 0x0, 0x1, [@TCA_CT_NAT_IPV6_MIN={0x14, 0xb, @private2={0xfc, 0x2, '\x00', 0x1}}, @TCA_CT_ZONE={0x6, 0x4, 0x15}]}, {0xe5, 0x6, "3a6787e64b422053ee5a680a0634a4b4209e0f088a82c84842852c43fd1fc291c1bb3da34346ec12fa321aceed055f6f89391bcc05c4663003f2339afa7966cbccef2f42731d34f0efa4d7c7c5e03fa0b9203065eabdc9449b13a2ba3a2194d85aa1903324808e338b6ebd6dfff0b08e93fc6ebb456e36033756efc255d0c6629d64a8918c5ef04c2fb6da65a2015dc73cd1a5c46b09f4b8f488a26d523d2d639cc89bcd4e458c6b4c67572f164eabd9f1f80b03435a04c78b579be348e9d5c3a11d234bb763e0065b39e72859d95e36515c1db55ef47c1bed4d051ceefdc2ea79"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x1, 0x3}}}}]}, {0x1b1c, 0x1, [@m_nat={0x15c, 0x20, 0x0, 0x0, {{0x8}, {0x54, 0x2, 0x0, 0x1, [@TCA_NAT_PARMS={0x28, 0x1, {{0x8, 0x6, 0x2, 0x100, 0x10001}, @broadcast, @multicast1, 0xff000000, 0x1}}, @TCA_NAT_PARMS={0x28, 0x1, {{0x1, 0x0, 0x7, 0x8, 0x233}, @broadcast, @remote, 0xffffff00}}]}, {0xe3, 0x6, "0e91a33d18b46768bf314c74682c8c90d6e614cd533cdb45d483fe747a83398983b4f9f116baf43e65d6a6274d035ea99ede57842a5937ae74998cb4bb7498eb79c5c691a480758d3a178707c20a0ed7943bd91ab813caeb2b87a46149a6ac6331d350f99c74ec334003de2dfafa49fe6254530a6282bcfd2a4499321b9fa752dab47b79eaccede9200302236238f2df99d801a91367aae0c23691970e7eeefa52e947b154df370c59d901c3ba7ce382ddfaa5c2e2cc56864f528a6dc79110edc484a210f4e8fd6c1fafd2ecd91fbcd022aa2eeae993fc9132bddb8b5405fa"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x2, 0x1}}}}, @m_police={0x1274, 0xc, 0x0, 0x0, {{0xb}, {0x117c, 0x2, 0x0, 0x1, [[@TCA_POLICE_RESULT={0x8, 0x5, 0x6}, @TCA_POLICE_PEAKRATE={0x404, 0x3, [0x2, 0x8, 0x2, 0x5, 0x20, 0x6, 0x2, 0x7da, 0x2, 0x3, 0x1ff, 0x800, 0x0, 0x800, 0x7fffffff, 0x4, 0x101, 0x40, 0xff, 0xe5, 0x0, 0xa2c0, 0x6, 0x100, 0x7f, 0xfa, 0x1, 0x5, 0x1, 0x5, 0xd0e, 0xcc, 0x80000000, 0xaf, 0x9, 0x6, 0xffffff7a, 0x0, 0x473d, 0xe79, 0xc2bf, 0x9, 0x4, 0x4, 0x6c03d9d6, 0x81, 0x6, 0x7fffffff, 0x6, 0x1, 0xa2d, 0x8625, 0x7fff, 0xffffff01, 0x80000000, 0x0, 0x3, 0x1, 0x0, 0x9, 0x0, 0x2, 0x1000, 0x2, 0x6204, 0x5, 0x8, 0x8, 0x0, 0x4, 0xfffffffc, 0x800, 0x502, 0x766, 0x1ff, 0x80000000, 0x7, 0x5, 0xff, 0x6, 0x7ff, 0x8000, 0x7, 0x7, 0x8, 0x20, 0x8001, 0x9, 0xfffd, 0xca5c, 0x6, 0x2, 0x3, 0x2, 0x3, 0x6, 0x9, 0xfffffff7, 0xffff, 0x7, 0x7ff, 0x80, 0x1ff, 0x7, 0xd62d, 0xf00, 0xffff, 0x0, 0x101, 0x0, 0x1, 0x400, 0x6, 0x7fff, 0x3c, 0xd0f, 0xfffffff9, 0xee, 0x2, 0x1, 0xffffffff, 0x6, 0x3, 0x2, 0xffffffff, 0x72f3, 0x236, 0x0, 0x9, 0x3, 0x7fffffff, 0x20, 0x4, 0x1, 0x9, 0x9, 0x7, 0x3f, 0x3, 0x9, 0x7b, 0x83c, 0x4, 0x1, 0x7f, 0x400, 0x0, 0x1, 0x1, 0x100, 0x8, 0x8001, 0x6c2a, 0x7fffffff, 0x10000, 0x401, 0x0, 0x3, 0x1ff, 0x3, 0x4, 0x200, 0x5, 0x3, 0xefb, 0x1, 0x1, 0x7, 0xff, 0xe7b3, 0x148, 0x2, 0x401, 0x7, 0x7, 0x9, 0xfffffcaa, 0x3, 0x401, 0x6, 0x8001, 0x9, 0xd724, 0x5, 0x426, 0x3, 0x2, 0x8, 0x64, 0xfff, 0x3ffc, 0x0, 0x0, 0xe1, 0x9, 0x10000, 0x6, 0xfe000000, 0xae31, 0x41, 0xffff, 0x10000, 0x3f, 0x80000000, 0x200, 0x9, 0x0, 0x3, 0xffffffff, 0x101, 0x5, 0x8000, 0xff, 0x3f, 0x8, 0x7fff, 0x7fff, 0x8, 0x0, 0x2, 0x4, 0x4, 0x800, 0x169b, 0x0, 0x3, 0x2, 0x1f, 0x9, 0x9, 0x6, 0x4, 0x3e000000, 0x3f, 0x6, 0x8, 0xf195, 0x9, 0x101, 0x8, 0x24000, 0x1, 0x1, 0x2, 0x8, 0x6, 0x2, 0xffffff7f, 0x5c34, 0xffffffff, 0x5e22339, 0x800, 0x4, 0x7]}, @TCA_POLICE_AVRATE={0x8, 0x4, 0x17}, @TCA_POLICE_RATE64={0xc, 0x8, 0x4}, @TCA_POLICE_RATE64={0xc, 0x8, 0x4}, @TCA_POLICE_RATE={0x404, 0x2, [0x2000000, 0x6, 0x3, 0x49c2, 0x2, 0x7, 0x8001, 0xfffffffb, 0x60f, 0x2ee, 0x7, 0x4, 0xffffffff, 0xc14, 0x0, 0x8, 0xff, 0x3, 0x73a4, 0x200040, 0x8000, 0x6, 0xcc, 0x87e, 0x6, 0x7, 0x2, 0x7f, 0xffff, 0x7, 0x7, 0x140000, 0x8001, 0x8001, 0x5c5, 0x3f, 0x9, 0xda37, 0x5, 0x101, 0x5, 0x9d, 0x4, 0x64, 0x9, 0xc4b, 0x1, 0x8000, 0x100, 0x9, 0x7fffffff, 0x5156, 0x5, 0x8001, 0x80000000, 0x1, 0x7, 0x7, 0x1ff, 0xffff, 0x3, 0x5, 0xdc16, 0xfffffff8, 0x51, 0x800, 0x83, 0xffff2fdc, 0x7fffffff, 0x10001, 0x7f, 0x100, 0x1, 0x8, 0x6, 0xc0, 0xff, 0x7fffffff, 0xffff0000, 0x0, 0x9, 0x0, 0x0, 0xffffffff, 0x1, 0xfe0, 0x71, 0x10001, 0x0, 0xf27, 0x8, 0x10001, 0x8000, 0x7, 0x80000000, 0xb5, 0x7fffffff, 0x913, 0x0, 0x0, 0x8f7, 0x7, 0x7, 0x88, 0x1, 0xffffffff, 0x0, 0xfffffffe, 0x3, 0x6, 0x7, 0x0, 0x3, 0x1, 0x2, 0x0, 0x4, 0xbd06, 0x0, 0x60, 0x2, 0x8aec, 0xffff8000, 0x1ff, 0x1000, 0x8, 0x6, 0x8, 0x20, 0x8, 0x56, 0xa2a, 0x2, 0xad1b, 0x82f4, 0x6, 0x24f, 0xffff, 0x0, 0x80, 0x4, 0x7ff, 0x5, 0xfffff001, 0x4, 0x5, 0x9, 0x5, 0x2, 0x400, 0x6, 0x4, 0x401, 0xfffffff9, 0x1, 0x3, 0x3f, 0x1f, 0x3, 0x2, 0x100, 0x8, 0x40, 0x0, 0x8, 0x3, 0x20, 0x897, 0x5ad, 0x5c2d, 0x6, 0x101, 0x4, 0x10001, 0x571, 0x400, 0x7, 0xfffffff8, 0x1, 0x5, 0x1, 0x95a, 0x10001, 0x8, 0x9, 0x1, 0x1a05, 0xbe, 0x10000, 0x6, 0x4f, 0x7, 0x3ff, 0x3f, 0x8, 0x2, 0x2, 0x389, 0x114c, 0x9, 0x8, 0x0, 0x20, 0x1, 0x4, 0x3, 0x0, 0x0, 0x0, 0x80, 0x2, 0x2, 0xfffffffe, 0x1f, 0x1, 0x1, 0x2, 0x6, 0x2e72, 0x5ca, 0x8001, 0x4, 0x9, 0x100, 0x80000001, 0x7f, 0x5, 0x7, 0x3, 0x6, 0x1, 0x6, 0x0, 0x9, 0x0, 0x8, 0x0, 0x0, 0x5, 0x6, 0x7fff, 0x1, 0x80000000, 0xb45, 0x7, 0xd2, 0xa2, 0x6, 0x0, 0xffff, 0x5, 0x46e, 0x7fffffff, 0x5, 0x4]}, @TCA_POLICE_TBF={0x3c, 0x1, {0x3, 0x20000000, 0x101, 0x800, 0x100, {0x0, 0x2, 0x9, 0xffff, 0x9, 0x9}, {0x0, 0x1, 0x2, 0x4, 0x5, 0x8001}, 0x1, 0xfffeffff, 0x4}}], [@TCA_POLICE_RESULT={0x8, 0x5, 0x6f}, @TCA_POLICE_TBF={0x3c, 0x1, {0x3, 0x0, 0x81, 0x4, 0x9, {0xff, 0x1, 0x1997, 0xffff, 0x2635, 0x40}, {0x3, 0x2, 0xfff, 0x3, 0x1, 0x7}, 0x5, 0x6, 0x436}}, @TCA_POLICE_RATE64={0xc}, @TCA_POLICE_RESULT={0x8, 0x5, 0x4}, @TCA_POLICE_TBF={0x3c, 0x1, {0xffffffff, 0x7, 0x3, 0x3ff, 0x64000000, {0x7f, 0x2, 0x8000, 0x8001, 0x2, 0x19}, {0x9, 0x3, 0x4, 0x85a, 0x0, 0x6656}, 0x5, 0x3ff, 0x81}}, @TCA_POLICE_AVRATE={0x8, 0x4, 0x7}, @TCA_POLICE_AVRATE={0x8, 0x4, 0x5}], [@TCA_POLICE_AVRATE={0x8, 0x4, 0x8}, @TCA_POLICE_RATE64={0xc, 0x8, 0x9}, @TCA_POLICE_AVRATE={0x8, 0x4, 0x6}, @TCA_POLICE_TBF={0x3c, 0x1, {0x4, 0x2, 0x80000001, 0x3, 0x6, {0x6, 0x2, 0x3ff, 0x0, 0x7fff, 0x5}, {0x4, 0x1, 0xf456, 0xffff, 0x35, 0x40}, 0xff, 0x8, 0x8}}], [@TCA_POLICE_RATE={0x404, 0x2, [0x2, 0x7fff, 0x5, 0x3, 0x4fb, 0x7ff, 0x0, 0x3ff, 0x7, 0xffff, 0x3, 0x1ff, 0xbf, 0x7fffffff, 0x8, 0xff, 0x8000, 0x80000001, 0x0, 0x2, 0x80, 0xfffffff9, 0x7, 0x8, 0x1000, 0x7, 0xa8a6, 0xfffffffb, 0x0, 0x0, 0x1c, 0xf11c, 0x101, 0x800, 0x1, 0x1, 0x7ff, 0x0, 0x7, 0x8, 0x0, 0x200, 0x6, 0x9b0c, 0x3, 0x2, 0x9, 0x9, 0x2, 0xafd, 0xffffffff, 0x3, 0x0, 0xf10, 0x2, 0x80000001, 0xa3bc, 0xfff, 0x2, 0x100, 0x3, 0x8, 0x1, 0xffff8c67, 0xbb6b, 0x3, 0x800, 0xfffffff9, 0x8, 0x34, 0xffffffff, 0x4, 0x40, 0x7, 0x0, 0x2, 0x2, 0x2, 0xfffffffe, 0x9, 0x5, 0x3f, 0x2, 0x6, 0x401, 0x8, 0x72, 0x5, 0x529, 0x4, 0x76d, 0xd47, 0x7, 0x8, 0x8001, 0x7, 0x5, 0x80000000, 0x81, 0xffff, 0x16b7, 0xd0, 0x8001, 0x5, 0xb5, 0x8001, 0x7, 0x5a, 0x10001, 0x3, 0x100, 0x0, 0x8000, 0x8, 0x8, 0x2, 0x1, 0x6, 0xffff, 0x1, 0xb8, 0x3f, 0x1, 0x3, 0x6, 0xa1a, 0x7, 0x6, 0x120e, 0xfffffff8, 0x2, 0x1, 0xf7, 0x2, 0x6, 0xfffffff8, 0x0, 0x7, 0x5, 0xfffffffc, 0x7ff, 0x9c, 0x110, 0x9, 0x562, 0x4e80, 0x0, 0x0, 0x9, 0x6, 0x5a0, 0x400, 0x30, 0xffff8fef, 0x8000, 0x800, 0x7ff, 0xffffffff, 0x3, 0x8, 0x2, 0x20, 0x63, 0x9, 0x8001, 0x4, 0x6, 0x4, 0x3e0000, 0x8001, 0xfffffffb, 0x0, 0x3f, 0x800, 0x1, 0x1487, 0x1ff, 0x5, 0xac80, 0x1, 0x74b7, 0x800000, 0x80000001, 0x80, 0x8, 0x8, 0xfffffffd, 0x400, 0x54, 0x0, 0x4, 0x200, 0x8, 0x5, 0x2, 0x5, 0x6, 0x7f, 0x3, 0x0, 0x1000, 0x4, 0x33, 0x7f, 0x8, 0x974, 0x3, 0x1, 0x3, 0x6, 0x9, 0xab, 0x800, 0x100, 0x1, 0xf30, 0x10001, 0x2, 0x9, 0x0, 0x2, 0x5, 0x9, 0x401, 0x4, 0x8, 0x0, 0x180000, 0xf8, 0x10001, 0x1000, 0x7ff, 0x159, 0x3, 0x540fe27a, 0x8, 0x9, 0x8001, 0x4, 0x7, 0x4, 0x6, 0x4, 0x4, 0x3c, 0x7, 0x1000, 0x97000000, 0x101, 0x0, 0xffff8001, 0x2, 0x4, 0xffffffc1, 0x88ae, 0x4]}], [@TCA_POLICE_RATE={0x404, 0x2, [0x7, 0x831cd8b, 0x9, 0x2, 0x2, 0x52, 0x4, 0x3913, 0x1, 0x2, 0xffffff0a, 0x9, 0x1, 0x1, 0xffff0, 0x3a137682, 0xff, 0x9, 0x251fc07a, 0x4, 0x80, 0x605d, 0xff, 0x6, 0x0, 0xffff, 0xe1, 0x0, 0x0, 0x20, 0x0, 0xfffffffa, 0x5, 0xa7, 0x15, 0x101, 0x36d, 0x9, 0x1, 0xffffffff, 0x3, 0x5bb, 0x8000, 0x4, 0x400, 0x2d9, 0x6, 0xd0d, 0x2, 0x4, 0xfffffffc, 0x5, 0x5, 0x1, 0x63952b1b, 0xff, 0x3, 0x16a73061, 0x3, 0x2, 0x8, 0x9e, 0x0, 0xfff, 0x68, 0x7, 0x4, 0xe9b5, 0x80, 0x5, 0xffffff17, 0x1d0000, 0x1, 0x2, 0x2, 0x8, 0x7, 0x8000, 0xd00, 0x4b0f, 0xffff0001, 0x670, 0x8, 0x81, 0x4, 0x8, 0x7ff, 0x7ff, 0x6, 0x4, 0x20, 0x1, 0x1, 0x400, 0x0, 0xffff, 0x0, 0x7, 0x3, 0x9, 0x4, 0x8, 0x5, 0x9, 0x400, 0x0, 0x80000000, 0x9, 0x8, 0x8, 0x525f, 0x7fff, 0x7, 0x8, 0x6, 0x1, 0x6, 0xa0db, 0x3f, 0x4e07, 0x6, 0x100, 0x1, 0x6, 0xfff, 0xfffffffa, 0xf2, 0xf10, 0x7, 0x5, 0x7, 0x2, 0xc2, 0x3, 0xff, 0x0, 0xffffffff, 0x7, 0x800, 0x5, 0x3, 0xc7, 0x1000, 0x467, 0x2, 0x9, 0x4, 0x5463, 0x153, 0x3, 0x1000, 0x8, 0x9, 0xffffffff, 0x8, 0x3, 0x0, 0x8a0, 0x7fffffff, 0x8, 0x5, 0xe3, 0xdb, 0xf703, 0x86f, 0x3f, 0x1f, 0x7fffffff, 0x8, 0x1, 0x7, 0xffff0d5e, 0x5, 0x9, 0x81, 0x1, 0x22, 0x1, 0x5, 0xf, 0x10000, 0xfffffe01, 0x8000, 0x76, 0x6, 0x7, 0x5ec66bf2, 0x7fff, 0xfffffffc, 0x1f, 0x7, 0x97c, 0xa333, 0x0, 0x1ff, 0x101, 0xff6a, 0x0, 0x46, 0x9, 0x80, 0x10001, 0x3, 0x1, 0xf338, 0x4, 0x10001, 0x5, 0x9, 0x2, 0x7ff, 0x62b4, 0x9, 0x1, 0x7, 0x200, 0x3, 0x81, 0x1, 0x7f, 0xffffffff, 0x2, 0x3, 0x0, 0xffffff01, 0x8, 0x7, 0x9, 0x1, 0x20, 0x80000001, 0x6, 0x7, 0x4, 0xfffffffa, 0xb3, 0x1, 0x3, 0x1000, 0x9, 0xe17, 0x8, 0x5, 0x39, 0x6, 0x10001, 0x3e, 0x77f2, 0x9, 0xffffffff, 0x62, 0x7f, 0x8, 0x1ff, 0x6, 0xfffffff9]}, @TCA_POLICE_RESULT={0x8, 0x5, 0x80}]]}, {0xcd, 0x6, "71fdecd6a4f4fb667fc46b473ccf4401974ede14ce09a5bd8a789176a30fba411269897ba5a13f9152007d72838401e04488571a2fd25cb34693b76fc0d34200f3e3d0024c706661d4245eeecadcd51e3a3b5861a4ffbfb58f40134829ac521727d993cb95dc55a9fd053c1713597e41e5ae46a345990548d39c5b63ddd20f357ecc037dc6b6d09c94e3099066a7256df67b0a27383f86b39614d349a21a19bdcd577fa190b9fd9f4d941a0a276b236f22234fb5f67cc87f39a2652cc0b97152ba7ac752eeced6736c"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x1}}}}, @m_pedit={0xbc, 0x1a, 0x0, 0x0, {{0xa}, {0x3c, 0x2, 0x0, 0x1, [@TCA_PEDIT_KEYS_EX={0x18, 0x5, 0x0, 0x1, [{0x14, 0x6, 0x0, 0x1, [@TCA_PEDIT_KEY_EX_HTYPE={0x6, 0x1, 0x3}, @TCA_PEDIT_KEY_EX_HTYPE={0x6, 0x1, 0x3}]}]}, @TCA_PEDIT_KEYS_EX={0x20, 0x5, 0x0, 0x1, [{0x1c, 0x6, 0x0, 0x1, [@TCA_PEDIT_KEY_EX_HTYPE={0x6, 0x1, 0x1}, @TCA_PEDIT_KEY_EX_CMD={0x6, 0x2, 0x1}, @TCA_PEDIT_KEY_EX_CMD={0x6, 0x2, 0x1}]}]}]}, {0x55, 0x6, "e8b714955aff02beba9f979ee73dad360e4744faed71507eeea46ae5cc0a7feb948665465597cfc2ac08a2b3d689480cb6b28b706d3dfd131a1d82f8b0ccd0ad944b95f9755b2d835893f837a1782c3ae9"}, {0xc, 0x7, {0x1, 0x1}}, {0xc, 0x8, {0x2, 0x3}}}}, @m_xt={0x4ac, 0x13, 0x0, 0x0, {{0x7}, {0x3ac, 0x2, 0x0, 0x1, [@TCA_IPT_INDEX={0x8, 0x3, 0x5}, @TCA_IPT_TARG={0xf7, 0x6, {0x400, 'security\x00', 0x3f, 0x401, "6e355aabb9a96176f9f1298b7364f73780a79ab038faa790a6c2e06293087fc73c1493b99b9bb9b1c3cbe0fa3ac232992561b88403893acd340f47b53da247ceaf0840a9b1fa846ff434e17b5bf74b89a15a857bba848203c72a0f3bd6b5cd42aef06569b882542732a14f4c9f08485d92723ceb4a9236427174e9f0a6cd49a30590a03e77e2ba9d93aef89bc08712c0fed4109e169bbfd559116abb25790c7f721024fc0501cadc59c72aef7ebe7a88082aea0c581bfb52a56af0157cda37e481086b3eee2dfbf628717ea302"}}, @TCA_IPT_INDEX={0x8, 0x3, 0x20}, @TCA_IPT_TARG={0x11a, 0x6, {0x1, 'nat\x00', 0x4, 0x100, "95b759431547ace99f61372e00e4a99ac7bd9849d3f71efcd7b2c2162ffabdfa9c94b20125edd7eb65b749af0a13fb50f796d31a5bd05c912d017ae0330befa01b045b394103668188720e2694925636d0ebd9592bf8131ee4e68163739ba55708bbcfda3dd35579ee5d6d2d8b879aa121d56333f901a5d108abbf7e5bcdd7cec2e14c2f98a790e60f9f67250096f352a9a97ed83d01f725a1d47a27482334701a0c0ae97a84f0fff574572bbba548600f116fc9d5932f10a1d5555f6cf7f9a8685c8753c46dfd964d84f06f473e93346f6a5300ed4e7f82dc1bc704f45840795e8991e970f276a93b0247187820224b"}}, @TCA_IPT_TARG={0x9c, 0x6, {0x5, 'nat\x00', 0x42, 0x3, "b5c258996c0dec54f66b2adbb42bd42f61831ce342304b84aeb3710348536ce78c413e9a204ca951e0badc055c59963dcad0a5ede5df17e54db96eb5cb87c1fa2ba6ad22c8f646cf96c5e04da06863edf81c228fea3fc7b2ed622344c750ef8f3e2d6401a8d7cb1e204b3c893d10987f0f23"}}, @TCA_IPT_INDEX={0x8, 0x3, 0x1b50}, @TCA_IPT_TABLE={0x24, 0x1, 'filter\x00'}, @TCA_IPT_TARG={0xbb, 0x6, {0x4, 'filter\x00', 0x0, 0x200, "2e14dc42a439f686faafdca2af378fd9721362dab9f2bb2da2fa7ae34b609d21a3575b9a10235a0ee99524d91d8a7c32cf5e369f364429d6b618a1ab2211968b323cb8c8e573293aca15125d3979de48e647a72c967a1030ed8cf6a0dbc770bce2d2867008f80de2ca1af384fb33a02f9f3959e42c758a9677b78d35f23438dcd3b5e5b3e3de15e1887c7f6f88ac206101"}}]}, {0xd9, 0x6, "b93d34c391cb4d535592434f9a7c9d68135fe32abd27380f1d64ca820a986cbdff0300df727037654e1f2362e8154d8d5a535190cfe9627aabfb6c17be6feb67f0ef7b31a295c6bbb4fd3eae2ae784e4c66cfedf7b1a050aa4d93368b43825dfdf46906f7130dc4b70337be98cabade89e90eee6be4717611aee366e9c4d156ae1303f164bdde6e22de23d188c48351b0068a76fd3fb3e4278ba8f9412c92c179554a234ea624bf4b0aa6637ddd410e55680c7acb904e934dcbb9005e66b4fdb9f8f3aa55dad4f63f25190a71a6a102661e3225b80"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x3, 0x1}}}}, @m_nat={0x174, 0x1f, 0x0, 0x0, {{0x8}, {0x7c, 0x2, 0x0, 0x1, [@TCA_NAT_PARMS={0x28, 0x1, {{0x2, 0x5, 0x3, 0x0, 0x7e}, @loopback, @rand_addr=0x64010101, 0xffffff00}}, @TCA_NAT_PARMS={0x28, 0x1, {{0x1, 0x53e0b663, 0x3, 0x2, 0xa4}, @local, @dev={0xac, 0x14, 0x14, 0x2c}, 0xff000000}}, @TCA_NAT_PARMS={0x28, 0x1, {{0x200, 0x80, 0x3, 0x9, 0x7}, @empty, @rand_addr=0x64010100, 0xff, 0x1}}]}, {0xd3, 0x6, "c524e609445b859e6a27c4a7f8c7d1bae3701acd22365933aa623d0092aea7abb88d4a33fd111e60cafc6d66feb7413f7134371f62c6cd0b8263470d0ba0452e82f9a1b328278e5273a0aa10b3a8788ddd6d25fca7cf9bf7f1d271293885a7ca0e13bbfba90406ee5ac96c28add0007dfc422ad3e166b7b14ed6a7b4f4de78dc1d1da095e9487467ad4c761816c59cb0b6baced621dd58043ebab3f405bc4b8aa546a89a1d88430db16418a0f35019c642a23d5521ff4233957f2f7cb469d94ea1922947cb13da09f514e2d0c6b56d"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x3}}}}, @m_skbedit={0x6c, 0x1, 0x0, 0x0, {{0xc}, {0x3c, 0x2, 0x0, 0x1, [@TCA_SKBEDIT_PARMS={0x18, 0x2, {0x4, 0x5, 0x2, 0x80000000, 0x1}}, @TCA_SKBEDIT_PARMS={0x18, 0x2, {0x1, 0x5, 0x2, 0x3, 0x80000001}}, @TCA_SKBEDIT_MARK={0x8, 0x5, 0x6}]}, {0x7, 0x6, "e597cb"}, {0xc, 0x7, {0x1, 0x1}}, {0xc, 0x8, {0x1, 0x3}}}}]}, {0x12b8, 0x1, [@m_nat={0xd0, 0xa, 0x0, 0x0, {{0x8}, {0x7c, 0x2, 0x0, 0x1, [@TCA_NAT_PARMS={0x28, 0x1, {{0xffff, 0xfffffffd, 0x4, 0x5, 0x5}, @rand_addr=0x64010100, @broadcast, 0xffffff00, 0x1}}, @TCA_NAT_PARMS={0x28, 0x1, {{0x8, 0x1, 0x7, 0x6, 0x80000001}, @multicast2, @remote, 0xff000000}}, @TCA_NAT_PARMS={0x28, 0x1, {{0xfffffcc4, 0x8, 0x4, 0x4a, 0x7ff}, @local, @dev={0xac, 0x14, 0x14, 0x2a}, 0xffffffff}}]}, {0x2d, 0x6, "bc6b8bf5aab2e78a7ad21549aaaa47e86907c349180305192f884edef76ddaef91fb29959039f9d932"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x3, 0x4}}}}, @m_bpf={0x1090, 0x12, 0x0, 0x0, {{0x8}, {0x68, 0x2, 0x0, 0x1, [@TCA_ACT_BPF_OPS={0x4c, 0x4, [{0x7fff, 0x7f, 0x6, 0x7fffffff}, {0x7016, 0x50, 0x5, 0x7}, {0x20, 0x7, 0x6, 0x3}, {0x0, 0xf7, 0x40, 0x7}, {0x3, 0x24, 0x9, 0x9}, {0x2, 0x7f, 0x2f, 0x3f}, {0xa77d, 0x40, 0x3f, 0x9}, {0x8, 0x80, 0x3f, 0x1992}, {0x8ae1, 0x3, 0x39, 0x4f9}]}, @TCA_ACT_BPF_PARMS={0x18, 0x2, {0x7e0, 0x9, 0x6, 0x5, 0x7fffffff}}]}, {0x1004, 0x6, "d5c397cd6a05dfb9ee416af74a320149463645396597a2b5cd06857b94e64883c377191a7447f76206cbf62b17395679d6455a77d466b611090105e10df94939e349296f8b25ecdd8b37b4e38dfd6c40af43b5675aca6aebfcdad96640c38eecea90c6f401e9b1ea907b505fca515af7b9dcc62128b5f5a2ee0b923e73becb7b974cd10d12ac9313cfd146b4eef21c3c01dd074411543a973f53954abca2895e8d560147b79b9046a925bdd3d40ef8226dab1b40ea5c9e9988374e04011a06b012c41a426c2977702f6a3d8dfc2afa1fdda03e73f2622958151834764b2f447f73b9fe97f131a96d248594719da44dd61c2fb5a7f627c7042ff2de0cd46a0a8b15616c560f13cb83403735eb433c5e17b8d8c8b1fdf254beb05b1b5988465b96da802f1f6b32346ef1a83ba4ac4088166eed1a6dc19f4f069cfa1845641c9da70f7a684b46b6a8c4eaf1888220bf7a6a830d2deff703e71afa1e44903393834c75c20a296d822f70cbcb20d4683789eef42cbddef50b652ec5a24645b35e8fd5b9cda2b98f85bd786fececa8d18b3cd7d74431efaf42c08a0f98b0f1ea5ccd990f11b81a61a8cc0173b6bcf2449cb7530b3a8faceefb5161057b29db366cfb1b02c0bcf834b8ee7c3d5a22e137c1ad7d17261f40bcf15565b27c864d1f388c264c89c1e5f4c0e70f396afcb190c57c8a83e42d6253bb339feefb786696b7679c0846c55c86097daaa948a9cc496be08a95a2e51a96f8c4efd2195102f8f020d2b56648309aba5b2494966f0b096961412506c02616fabc1b523e6e5138d0629ddaa0ab3af3b3ded3c27f1317c4fb864c2dda0fa800814b36b1edcbbd1dee5f502d1bdb98f4d7bcd65b65158fc235ddc982bbfb70c10f38fc765146b4bab0ba4d650d463125ec6d6cfabbce5e7e0ad7661b1344771f4696c67a75cdf9a11bc4202100b3bd149b1f4cc1c47b3eef569d7cdcc57913e5cc93c9dc4d1cfcb5d288a1e3d22f9095a0e5e1dc56daf42d9163b5f7c8e1e5ae295add87d098959f033d44cae83332ed4f042536f930753f3b758e0454cdd23a2e582763cfb5b95319230ae0842ce4fde301dcc5acce4d748fd9d5472486b0b925a305e2f74caae64f1cf34fafe551430e68f45a10a64e2cf30ffd72f3da3a92c412703dc90dba3a4ee9d5fa249eb258c068e0a0e6af0bf686a67ecd9048a262bbb70ec5a08be882ce869bae99311ebccb048b6de6d80efa7c061728f6ccd88b48956d3176be9ed51eccb849b2da66a2f57e29a5b2f5f2f543c7283b807b08d075f309671301337a1bb2e6e0845f8b2dc80a81b702f7eeb81c85482e4ef0abc0fcb61a6a0554afd272a3133fe6efe695143c5f94fc2185333fc17dcda0b1249cfbceaa84229c8e52bd671c829982716a6ba2cac973786a20dcdb1489fcd7e375aeda14bee7ea1ad20dd485a0bc6e2f7b0730ccdd1fc1f9560a6d2f446f311adf4e440757ef8805b970aadb96041dba69a7a1e16004d9846c94cd33196ff0a14299731b70e15dc686b91a5c8733d9cefb7ad44cdb397a9e7fb7d86db92786f16d8b6056dc194d46c25d23afc493edceace06560576f6cd45462591c3e93b4c71680bbd6a5bee9d1ff1a4ebb7e71266793e66d2a03a182c808375ff1be0ad8f0615e11d06713042ec257e0a2c9e6bb6fbbaf48b5887a2ac52e0068dfc1ef2d5f0c79febb6e83c014dc176cb36409e7e4837cd2117f7f9517e4af3c26ce3a56b0f6cd10b3b1dab1dc35ee8c268b14d2d7110968c96653801a992a238230c611a1b700311c3f976c1a85e5fcfc153db9ec7f95fa9bd83a25d971708ab14a19cf53fb0ed00c19fa1b77ccdeab51027300c7bac430215e32e3b5961d26a1a477e9cbe4c38428b1f89a7987a9d9971c4da1def147beaa14a0579d01fc471eb0550513fd53a1371b90f3cdb7a3c1b8f2e60c84d56f672502ab66c2d69c326bd34b44a6f450e64858a41953247056a8cc23e4f7d67f7515e64c0c16c40e18c0ae58abb92ba557aaf3115bbd16f10c5a165cbd31b1c93fb2b7171efab569d6ad3211891d2a73bd0e551363ad93b59c1679f20ee92b10056ca24792ba91e22f95a931e2722d19e904883e1e6a35b16786d8a9a1f8a574f7b666a6cda61ffd17c7a7061da34d0412cfdc6786fb354b1004068f523e6ac282e91f368525370a2e2b4b05e879e935b064a12de9f84c1583c99839ff6567386e64093a7f4fb68d5b79f2b4979cc2896fb544fac13673fd39a83b27e95337868d4e9b2a62ba5662be6293f430178ccb5b7c92ffbb2aaad681e7db1e85811a2e207e56dc8949d725d206282300ebd31a77bf158bb020328178c86ff005ad4e9531464c322886e8f1a47017bdc42790499ae5f7431293cfde6ba0f48826a73300f132e46dafdb74ef5fc6829c4e2abc111f1bcb055ee157d9189b4120e035fe90d3157dc2e8b710ecd1daa92dba256ae6e7aa2b8b1991d5df9dad07d09875599a43eda50b0726a195a32690b19a0257b7455fc96ad590fe009fe0ae6275d95295ba9b4a072cb5fb9e2ad2f433ef5fb7687ef16ddf4b613da4be923432ea6cf6bb472fe43644cea7893b5203efeaf63278ec1f3fba16b53e42b4e68ec48ba8a62a53d508c5145a6097806a6f4cc9a15469a3cde50844e8a0525ed3a04de0afad876733b6b8d82d9a3488cc01ab72808feb3a3979725355288376d57929eabbb0043bd6f1f841473d59221d4059bcea70d3bde23fe6abaf30c189c0bdb99bb56e4ff4af854ee1149017fd1bf5fa9b24fbd4370cc2d73b677f36a751ac8e21239a243622c732c7357e8a44ece685c6d585e4d82cb997160c2766949dfd8e2590a4dffccef738a2dda0eda04c06b389c41ab04536fe00c81d18adf0b9834ee24bcb9669fd77e8648bf9d5add2f237809f4c10a410afadb4bf6a98b4a4c5021d1a6fbcd52039ff9adc446c3a67c317a41785dfbfcf6e02a17a7f0f0f14fba4e8906a292e52c60c6136b2550a5bedab67d9adb9fa2f3c8fc04aa5719b87fac18b3363d753c3046a2e7bf2ce7ea9d989c6c9e55deb0861a0b025421efe20060110d9b9fe4f4473f8c20ae283fd688911c6ad451fb980d0b22eb44577015e452aadf471fe21590da1a10111ee241db4eed02881cacb87ae40788beb11ffd83a7b7bb6521b538ee94b20128c513cd074ad79f43d11e34cad7752fab6f653b4ddec32156ab72a255ee11efffdccd6ab5bc2b5b6da3fc4f31209cb71bc0b8d94d52d5abdc40ce329a4aedc3d0d77747255d1789f5ceab23402d5c9bc641270eef2d36d7f30202521d39764c5ac3c725ed17f278700576700ac52906fbce21a41385c8cfeb2c8cc65da752842102236a0c695a63f47b088f7bd9c9c60365136b8be6384b96e675444376844bce8d0438fbbe921429ce1c8c0a34fcb6b20b161b3dbc3f5bc852c8dcf3fa56e58ae7e70010ff0f74251a61e8eff8aad004d976a8b2c15ba6cd898f5c245e50c8f4bc5e170ab0ecb015e14cb06625189425470c2c56a959e7eafe8fe823f876d13c073dd0183acfb0ba18d54cc013da148960227b6f95f682af879e7d544eebe8f53d914940d9ad6a2e8289a690b0a914ec0878c9465f7782405cfee4a42f31e5c89ba27838f71515692fc18a500df155b74b4c8200077f513c3930d7966c90b4747a8fe5774b9a8f363084cdeb9e9a48c2b40da6fecf610cb66fd255e384e2f315d5d2f20f4c120a7aef7ffcb66a39340f000dbb9eea8e071d6b0adffe022279535e42329f03f03d0ca127804f1eb28af549d740891d80490ff710201c03d2c0fd75a1824ca80db7a9c45fcfbd1e020ee68be926ca6f780626c80f9e49b60c05f5557f2583794ee6825d2c831520530b200e74ddc3236422fd8b1d8cf7090cd7c10d21a9b4e315e83dc9bf15f23f0acb032be93e3029485b509f1d1c8c4c34784cc936ab1c8bf0dae4ab2f9b88773a30306543759bfba8e47a10981a70bad0ca33ce5a44183351ff9efd1569c8abb7e096d899361fe0a27a471e58a01f120f2a1adb0853dcd7a6b3224a7dfbbf1b9c3c13e9b5cdc43a6ce13aa8e2eabc0d7e9a25802ec3532a2adeb0b2af0386804c0ec3cebf1044d4e7fe4b39120a37942e0a0b76472318b67483d5c89e5afd5a9df8ba8a5d216fd5dbe33f1ef344b9c56b1d4ebc6fb4688b96005e86772af5350ace9f2f19b5180577e8e4369aad771fa2330109e5e58024d37c712f577b68037c1f6b18c10b1bbbb4e5dd07a16ff9a97c21983437709c65379de11a1727b1f3cfbed2d7286f55e43ede4b0696a310491485b2ea2bfa3d4f9d6d75b2780d05372c3941b371f97c6822ce5473f753e9e9402eec9e8c5e2678bbced5c6c10e3f995ef6013feaa0ed621b4cfe2ed549623f8ce3e7d4b8a160efa15c931c6b28c3e926988a4d4677f9d36a464ac0c7c24aab048169a52306a761a49b73b44f83ab65126aa29e597512caac7b2baf974322b87604aa6636e81759fa411e0cc3721828f9d5455808573a8807a0bd06a034c79e1f12734a6fd47cfa650213c3dcdee3540e3dc15c536e83d9984c6020ec6aafe8382094621128f13a1ff2d854234107a043a1431ec584c01e3bf0d78bff558d9a8cdf6dad5b500f0be8c1870487e08ebc34616b6f84c09e2cae4795014a232b84c4cd247469650763abe8a29ac653c122580b119c10f669a545b4357540c1b3114d219ec0d8f562bb5eb51b70f71c744ce4703d617c4f818a9ba959f353c8174c61200332d9af94035a5906ad34c7786d0a900fd102f8cab93b01fbb038f774f8616351c0e862f3eaae5ef17a5c503974db34f6bd2abef240562eac1bcec04e7c2f4628fcd271a874ca4b6831196ffedaa91ced339c46d8b9d4316f66bcd284274314b05dabd53ed14b39eb3af26f4053ca22fc3918340ed9eb9fbb06ae98ec61ee23cdf01fa688bea848ff1f3462faebf6fbef438255fde9286b2bea6e6ae525aa5f279a31ee3b349201c975d6fcb01b833759cadebb35eaef17d07a5931b5e9c9731ddb23ed5c242cfcde0c2c4f68364353ec4e29817a8f5c801f3d383d7373555914f771a11b3d11b6e38cf0b8c53f20a84a6fcbf4a127912bbcccd0f0c5e07e8ca464d208b4c9288cc3611b05bbd6fb44072b923f83d2d1955fad5055109cf2bd8a3bcb30386816b0b473cd4e2e9131a316d3ba64cb26b8f2c2f8ce463989a3c7d3831b4137be1f2bf57d1a0e599d72208613adcfd3c3b1c86a4d7a7baf8bce90e195d50d368a16675dda9f70cbbabfdd5206d595a8a92795fab2fd3d0a54e533d82f3fbf95b3655f440a8a49fdfecc80c2176532713539c3f89652571b60f09162163b99afe83a3677ef6c8683ef0affa72fe16c7632d55f0869aa9e8acf3df681c99f4bb59afd02dbbd593c95b4179adf6149b7c6e4708d2dba66dc279056e7b5fd672abb9a7224e4428e4fddc714a867da8c41cf572df942d2d27e43420e9e5595cfdaf089e9283034ad5facfa41d9ec293560255300bd9e1d84468a70942fc87090a9a33fbb843026012177789814499b3234e58e2c5c360535806f7bc2d063ffc92d1029ebd463ba0542931df9e1294fd29cdbd367691f4935215abacd75635fc331c33e1cc875af14f2e8aa2bde61e49eafb6618c122786c204792d2e11ae4a69d98046f82900570336b1e599ebf102de320febd2117b12cac8c61d34f58d94890db73b70a20501f65131c47dddd50e6d1453741999ad24c1e0add6f3cb5d42f5204db16cc08a0ac8faf244f23a3f99fdae2"}, {0xc, 0x7, {0x1, 0x1}}, {0xc, 0x8, {0x1, 0x3}}}}, @m_ipt={0x154, 0xa, 0x0, 0x0, {{0x8}, {0x30, 0x2, 0x0, 0x1, [@TCA_IPT_HOOK={0x8, 0x2, 0x1}, @TCA_IPT_TABLE={0x24, 0x1, 'filter\x00'}]}, {0xfe, 0x6, "1c99289766349766d6fb510168184525fdeb9ca5cfc8cddd4807de1f21b28ffbfdab168537494ccc1ad0bcb4c5d61f7450fd926be0abf90240d303fe47a44b155989144aacb23019a9e560b58875604502aca3a4ee10845fed4a1059f4704a4c37c65c5f4358feb67a61a3d24d717dd224085e4d4f095aad044e568af35003949bb11de9d3cc065ad6d6f0c5415f01f17c78e85211466384dcb192b646cd178f3eaef8f8da5ed00555f73aa1b939e79eb77743ca59dab6da445c222e8adc9654767838747d34fd788da596f4993519af07eb283c444346af4a9fce0218907a0332605feb938b2b8b00e5c66d1f34d3e1a6d8d7bd931527087bd7"}, {0xc, 0x7, {0x1}}, {0xc}}}]}, {0x504, 0x1, [@m_csum={0x130, 0x7, 0x0, 0x0, {{0x9}, {0x100, 0x2, 0x0, 0x1, [@TCA_CSUM_PARMS={0x1c, 0x1, {{0x0, 0x7fffffff, 0xffffffffffffffff, 0x80000001, 0x3}, 0x3a}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x4, 0x3, 0xffffffffffffffff, 0x400, 0x6}, 0x28}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0xfffffffd, 0x2, 0xffffffffffffffff, 0x7, 0x408}, 0x20}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x80, 0x1ab, 0x4, 0x1, 0xd78}, 0x2f}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x3, 0x2, 0x5, 0x7fff, 0x5}, 0x17}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x5fd5, 0x1000000, 0x4, 0x7, 0x43}, 0x2d}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x8, 0x2, 0x1, 0x0, 0x7fffffff}, 0x68}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x2, 0x8, 0x0, 0x7ff, 0x7f}, 0x4a}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x4fc7, 0x2, 0x701d22f88279bd7c, 0xd811, 0xfdf0}, 0x76}}]}, {0x7, 0x6, "a457bb"}, {0xc}, {0xc, 0x8, {0x0, 0x1}}}}, @m_skbmod={0x17c, 0xf, 0x0, 0x0, {{0xb}, {0x5c, 0x2, 0x0, 0x1, [@TCA_SKBMOD_ETYPE={0x6, 0x5, 0x6}, @TCA_SKBMOD_ETYPE={0x6, 0x5, 0x400}, @TCA_SKBMOD_ETYPE={0x6}, @TCA_SKBMOD_ETYPE={0x6, 0x5, 0x4800}, @TCA_SKBMOD_SMAC={0xa, 0x4, @link_local={0x1, 0x80, 0xc2, 0x0, 0x0, 0xe}}, @TCA_SKBMOD_ETYPE={0x6, 0x5, 0x817d}, @TCA_SKBMOD_PARMS={0x24, 0x2, {{0x5, 0x1, 0x7, 0x1, 0xfffffffe}, 0xc}}]}, {0xf7, 0x6, "469adb2e2cf41b85321c7b5a23cfb528ffa3f286d09af62de4c0437ed17e4bc9c0df7e91f1dd947ceafd1b3fd4482281e6b6beeb2e830983a1d8fcc1165bf74a9337c5d61d8de7fc97a7a88ff350763277657a4d4974eabf8c6afb00c159e410f71cbf97f49ef1d6ec8595a0c901694f158703dea8683b7337e77c54fc95ecf6fa59f7ecb82e4bfa7de9c4f19d7d17cc0aac8b5f4f27346cac83a14371745ada93c9389893e54022226ce33f522e2f30a08bb8f1e2d086a3a41dda677a7b90d8c10284d939365e627baa0f229165b37855d9075ec9a3215620d0e26e9c81a5168e03546833ea5e89fb0ed7d07a34f44df43233"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x3, 0x3}}}}, @m_xt={0x254, 0x1, 0x0, 0x0, {{0x7}, {0x188, 0x2, 0x0, 0x1, [@TCA_IPT_HOOK={0x8, 0x2, 0x2}, @TCA_IPT_HOOK={0x8, 0x2, 0x3}, @TCA_IPT_TARG={0xc1, 0x6, {0x1ff, 'nat\x00', 0x1, 0x7c, "a36265120b7e186ca73fc759df8344fc2da4c0e16638b260c37046bda260fe3105d6fa10b57dae25e049be42891968806e38241feb33b5ba929cfc8955b9b9e60bd0826593b77951e9dccf0fb6d938c12d6868dbc6ab182fd31f0694b458c7ce30ce63bcb51c3d90c82cd09197ab7fcb15fe2d30fd649858f9435da9dae330e353d73cdbb61e05ffbffd05c881b3e94dd4e1ed0dd0fce0"}}, @TCA_IPT_TARG={0x65, 0x6, {0x6, 'mangle\x00', 0x1, 0x2, "500437c4c1b7f744d5efe01b98bae1391531f0a2d3e85606b333f4bb365f02efecf2709598cd6ec282a516b504c7da9e4d30e4cf60aa9b9af4d7da"}}, @TCA_IPT_TABLE={0x24, 0x1, 'security\x00'}, @TCA_IPT_TABLE={0x24, 0x1, 'security\x00'}]}, {0xa6, 0x6, "e1d0c5568381ce0be90c8f727e47e1c9f19320379a4aabc4987eea4427f9729077f4ef598275cbcdc89aedc6277c313141bc107d81df8f22e26a77fab298edf9296c402840d753aff241a7c1c33077d1ee94471d8d7f900399b87efd3b6008453a5202861c54850aa152286831a718f02d04168bd0185fc3cebf3aa93bb1dd5ac99520a71faf5e747d3aa068e5e1ec548e3054a433da1c9c822337599fe2f79c3c15"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x1, 0x2}}}}]}, {0x16b4, 0x1, [@m_connmark={0xe8, 0xc, 0x0, 0x0, {{0xd}, {0x20, 0x2, 0x0, 0x1, [@TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x4, 0x80000001, 0x5, 0x1, 0x4}, 0x8001}}]}, {0x99, 0x6, "03aeab77c6b6512230bfb4a4ac9b43e3dcf0d450876d33959dc776e07a78196e3269bac6d8589a252afcf9539f70af09178c9c32b5d5968d9a4b0573f10b524b8e4cd19caf58fd02402025b05fd8e46fcd1c94c85b0dc75fe271aad7cfb111e47a70f5dfffdf94b6992422f368abc3fd15a707470c933185cb19812aca83b9611a926a8937eba6778a011e35bddf74b85861922719"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x1, 0x4}}}}, @m_ipt={0x23c, 0x1e, 0x0, 0x0, {{0x8}, {0x174, 0x2, 0x0, 0x1, [@TCA_IPT_TARG={0x10f, 0x6, {0x8, 'mangle\x00', 0x81, 0x3963, "fdd7875d02135264942e2e13ceb04c4abfc1c38223b7750262be842e76f650d9a8d6814ee8c83b01075659f5806c9bf17132c3f7898e32c08265853f9a5f37802c4d62eea01ac9c7458c96c094c6c161f3541fcc90630c5422c67169acb00e8bff2a268bde93a0dd5589a7f9cf35f5839961cc5cbf769820644ae0b07d94e45f23902b5b3ae7731f162f0144e8bb7153f0ebf787387e3cfcecb4fb96e749c5b849e0dd4bc4ac4647692857192301a035d92651455bbb9359345f10634e10d8405ee8049471621d9735138ded1c7fa59de5f150da3606266fd3e8fe91f90e032e3f0eaceb73"}}, @TCA_IPT_TABLE={0x24, 0x1, 'nat\x00'}, @TCA_IPT_INDEX={0x8, 0x3, 0xfffffffe}, @TCA_IPT_INDEX={0x8}, @TCA_IPT_TABLE={0x24, 0x1, 'raw\x00'}, @TCA_IPT_INDEX={0x8, 0x3, 0x400}]}, {0xa1, 0x6, "ba7efa3f282338c36e76b0cac6a7041260e6223343731221c4739f4de0c089b36caded50142fdde413819c12a936da64fc0c8d9cd8ecbc9a1a501aa5a6f1038a90793b3c2c440c0b86651fd1b5864446dca51e15d0fd81734bbf6bf6b9322003d8d102a54af20085485c9d7bd138667551a0ad46e3ccb501125791f70dfece56fae62212400d45412bfa98882c215c077793b9ed54d28986000009d854"}, {0xc, 0x7, {0x1, 0x1}}, {0xc, 0x8, {0x1, 0x2}}}}, @m_mpls={0xf8, 0x14, 0x0, 0x0, {{0x9}, {0x34, 0x2, 0x0, 0x1, [@TCA_MPLS_BOS={0x5, 0x8, 0x1}, @TCA_MPLS_TC={0x5, 0x6, 0x5}, @TCA_MPLS_LABEL={0x8, 0x5, 0xb315}, @TCA_MPLS_LABEL={0x8, 0x5, 0x30021}, @TCA_MPLS_LABEL={0x8, 0x5, 0x8e9ea}, @TCA_MPLS_LABEL={0x8, 0x5, 0xccd5b}]}, {0x99, 0x6, "d229cad884cae9affc3174e07f4c784fca055c4bf8d0398be5a871d4e5365bafbe988159fd8ae79523f2338ba4c88ae8938d8da1c1723a1e54156c1b754807c8005c931732bb05a2c2b5bccf6109a252c0993da752485a7bc9c8396c426b2a6dda2fdf0766313b40acb6c878394c31449a01ebda859adb01c0c15ae91f757cd307f7c6417699fbefacb1186354412a630b4387515a"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x2, 0x3}}}}, @m_xt={0x184, 0x2, 0x0, 0x0, {{0x7}, {0x12c, 0x2, 0x0, 0x1, [@TCA_IPT_HOOK={0x8, 0x2, 0x2}, @TCA_IPT_TABLE={0x24, 0x1, 'mangle\x00'}, @TCA_IPT_TARG={0xb1, 0x6, {0x1ff, 'mangle\x00', 0x7f, 0x9, "9e56e3ac8ddcda53596e90ecc48c78d65eddcdd5ace92ae5e2b159c3087371890895391f0002ae841d5a894346f1f3ecedfb1ff4a73509e7ff0617ca4f0f55c9ba55022f2cafb884ca418208e2d2b6b0da84e1b1cf1fdc132088a8eeeb33a00729548079410f967c5915754f3f11ad6d85365cdc17dc726f47a66736fb7708ccc3004c8c5b525c"}}, @TCA_IPT_TABLE={0x24, 0x1, 'security\x00'}, @TCA_IPT_TABLE={0x24, 0x1, 'security\x00'}]}, {0x32, 0x6, "655222f21dc8c3c3eae5ba5ba36a4a84581afb66b18d7a51e0154c419483f86ce93812b0483f728d2fb8f059455d"}, {0xc, 0x7, {0x1, 0x1}}, {0xc, 0x8, {0x3}}}}, @m_csum={0x1110, 0x5, 0x0, 0x0, {{0x9}, {0xe4, 0x2, 0x0, 0x1, [@TCA_CSUM_PARMS={0x1c, 0x1, {{0x0, 0x6, 0xffffffffffffffff, 0x100, 0xc1}, 0x1000}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x7, 0xffff, 0xffffffffffffffff, 0xff, 0xce}, 0x70}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x7, 0xd3, 0x3, 0x7, 0x9}, 0x61}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x6, 0x5, 0x2, 0x4, 0x3}, 0x13}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x67e7077e, 0x2, 0x7, 0x795, 0x7ff}, 0x4b}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x8, 0x2, 0x6, 0xdfe, 0x7fffffff}, 0x1c}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x51f, 0x9, 0x7, 0x2, 0x92}, 0x3b}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x7, 0x0, 0x1, 0x0, 0xb4}, 0x7}}]}, {0x1004, 0x6, "a4f6744c97e0549953a6b9668c2ee5bdbf8f6d8eb58f0b6f506dfd7f25f826d6c616ee0326543d40357ca355c89495d320bb884bf4e3a5036ca4301aba26ee893e14bb0a33047f593a0e715215ab31488849a15db369778d71b15dbd6da53524468c5a59dbc31f3714a742050e8e42eebc527384d37f9333a1cd74d02419861f8d5f4fff43cdb70694cde1c976e9ec4312de161f8872bb87ea486ccacd528b495c13a6f054cbb703dc38594629fee566736739aff2bd5d82e7c01d3bd4e8f1e5c6c68baf7d585d0da2aaccf3a77e87ee8f7b01e078860b3f562dc4b16c19a69a01b2b23faca1d1418ddb5e029faf004d2e08ecb26f2f569853214465a6129be99d0dbaecac718a38aa6e0a4f026f8dfcf3df96d356369abc2b3c0d1465757d3a9ee20fab22069f4a846990c4c8779a209c9c857c97b367448323d0ac3ed69cbca5ebb86799865420ebe3e0828cf3daeee8fed764d0bf1466fa87c008ac4c0157493de93febf8c6991ca2f9242b2ee5adbd660621b940a5426cd983120e69d79f993b71aa40ac0655b262c5924b3cce92445f72537cee84a81c5d9f4609ca8649e94f9dd8610dfc6cdf90ebc2f09ccf5c4eb3bf3c60d35b94045e3e05b6610b5e551689e923494b0a6f0ff64b8e536e26e0458f1757ed71d1d9f6d176ce3c23e0cf52275cbf8d010c04b9c41bc4ada9f73fa57b22a452c38ed7f659de11d8f281f5bdb9887e7a36e84f3cd33a39c4159e8b67d0b7a64fc8b0ac831922256384b61dbf0adb4b13955a97a4967673c003a2bc269d573707762e6d12d1e81ff3b1cc7242c1072a979353661ca4a687fd43e16983a6eb30b298427770c8c337c0e87ee45d4b6f442558670db3ed4d379109c1e6b7cb9ecff66816061a221f1c5104abc4ac6a2c76f680dbbf102f4ba7a33df85b2187658ee5af73f9e37d7fb93ac50da6ff6aa558fb48beb68eafb5f23a120e938aad5042d99b6d1d3c4e586825c115e9a90c9aff7f0b6f3c46634c93f84fc8b1c89a5c69b77cedd8e3e01a641472fc863045d3e50ef3059ebbc04760bb7f35bee571afcf7a14ec547184916d4ff6050be595d0ee5b569fc7313d83dcf73e65e92f6dac45dbfd1da86c78a7232aaadb6416164e10045c6b5a7977c0a1ac5213311f9f82625652eb875c55028f1a5b05556e6649f809490309cc1571972040e0d45ff53137802fe774ab814bb823e6e0ef236f535cbbea42b082bfca32c5bcdc5e7b30a2a5dee183a31ac852c2b4cba8a3f562eea46f40ec7f97deeb97b671a148492801e0dd3b04ff8fbc6d22435fe2e3148920ece2069b56da6509739328ad0b7e633a646c62424576965b01f4925b4c7cca98480872b81017617a874dd37f7cefcbc6dba5c27748bf5971b66b98e9032273db086f0c2e4f9d752c5633a6b75bf983c1c169b710633e02c911e60468855bbed3bdcce6f37d4033752e3cac45b9ca22f6a449ef5afe0536f3e4e855937c721891f591e66c2d9fb807fb91518b20f42e6e3b81d535612bf046fdc48c2165597b5e0f636357a54b771aaf38119aaad0900a9cbb9721fa63aa274989a1369e8ee873ab6d687f2447e5ba10cec17247bfdb15a2256b97c6caa5e52eb77bda9b68577bb53a3879283a913b13c65788ce414d9af61a9daeff517ffede4882e4e30a5153997431a121a436436a6525c8668affb9d284257d0b1e9d9dca5c120d0b57eda860bdd370f533c3ceed0ab57812181a9a8eb11f79d233367dcb8cf9982be61c41bd5c808287780b0015f463f40a8a8801c0b8e30511cd1d2f661c414db7c59edcbd09286d9e13f3f40424f598f332ab01ac583a0430cb989c9bf05195eec6652175afb92ba16152caec34b0075c16cf8248717ebf4f0db91c1c6d8ed8bb65c7147d2f8774f1227351027cf94a07d7094bd92450b7c82adde469a63e1c8040a07e66f35b5f5237e7871e87ef8a650b3a2e4fa88a11ec9cef08e5baa926885e0dec1afe90c6103e8a469181cbadf93aebb9d20726c0d57e3d21556ecbacf6348b45a43f1da77b14c2171e1f285744d83b88d3c85ba54ca3efef66514514ecd5dd5c75bcc7d500cbe9d062863870883cf992864141778bbd868683b8b2596e7507bf09cf9e9e10b11ecd333d2d42e254e8fc1ff860801143e52d4475d6460994ab74e4c75339df77d691c28fca4ccb1b000348204c6ee4356ef5e9977f4214bdcaf40a1f104a1b914520ea6e70ce85ba8c40c6382702ebfbd30835851db429d0f1d8f2549fce5cc539104fc225e9d13f27897e3440ebb2f3ec843807e57ea6c6aac617c017cdf8fc9f0b2016ebf8e6a23645c8564e5964892acc6f731b6bfd7e9c2fbf3614b7b0a4a4ea9a3745ca1bb7cc496ef034f2f20185870b1ea330077599fb1d355fcb0b4c98ed5a6609767d873f28013c61180b263b0ca0bf55c6e52c4ce7f63c4880ed325dffbd28c38dd4c8eb7f82e0548d9fa5b01787de60d3d9e0b8aecdbfbb69b21cf9994cda40819bbb10fdd9a77d9df090affc15b622114c33a88171509a822447874f93693d3cd0094e7ef909455f577d14a0edd49cd5a3b8c360c11b9041a1a8035bff9f10925a2f5759a62998e27e510e11ace3079cb4a2fb4a9c47baaf7af7fbe9fc987a283eb2d1b290d55ab19b7af4a02c9010ddaf701be592a7a1defea6e8f1a82d9fd8b152c7561e51c4489990993afc72acdabe4bc4acf48182efbbb6541700207b7882cc00e4a03df564c569c0c058290f012fe2cc4978f5642514655287040469aa0810c0731f6ded27f6e931029398838ab61e06a51b5c9bb44ff2ad5a94904abad2205a9b8f814d341cdd3b43b9664e4cece2b4057e12a664cb2aec4298528b5a0ec5998ab5f37f90e7ef3d4f08188737b62601b43df74ef8b2b3a7cd1b3e0e66ea88bc7b4acdf6e7251336eea1d97d2a12ae65c33bc0df452a404d7627264425119d4b4aa202931bff272e8614b2f87e0861ee612844c8a9949c1f424fda25e9f19c9f1b42fdc8ccf35f25405ed3df83bdea27b67ae40e3a28710bb72ff7dddbf8af746305de1586146c9bd1882870b3967b47d1752f9ce0d9dfb183a86c4032944b8c8be51c836deb8ebf899c1ed4a44873a5f239400b5229ff7a10827e12e041576b4b7eb9e1171900e98a8ee9831071b2d6d833373be52495f452197e90692ded63cffb6ab5ff3e7abe1dceb0fb47386e4f1ad2a439fdd8c0c7d67bff306a04b92b22f6b205986550b2f7603b8393cfa8c0b7c015bf8bdd80dc97b5118f04bff54f03f19f105a1eeebf478d7005f493366fb2b8924de41a8313f416646652f2fe2ee7a38f4d7eebda58d55488f91e05c94b18095d6b4538f9c17b615434943a185fc808a6e1b839439935f971eb721c991d106cdd7fec147aafb8a99c10b08c0b6921bfbf4466b73c12f98bcccc8194bb10357f9520b62e17c89b7198bbcae3edd5edfb0e121a32e8f15d66b008f4d48f9528d29fcbfe65e678bb22164713571a2635c893f1a7cd1ea0a7a1935bc2505c3cfef22097dfbe01f192b82dc134623809f529f87fd576bdcff1c4c5584fb5f19958507d5970b69291bf3d49287d143853f057c708fe67998020ff8a01604cd003a4185ad819fdb7d8b7fccae68ca3901be025ad3cb5740f836f30399c4623c69a6b5f8c535f4ccc4854d8d72bd814eed2ff3305c66ac155c6cd5a65b05334a704acfa4af419a9d71cf028e0e208e39f54010c89ee53357648c994e88ceabb3ccd9dc0579f936ce011b57308d8d5fbd91567de62c4a4004b68509127c6225ef2cfa4e19e056ea605b3391fd0f1de1edf05b23713b120dd5d9edc4e6a17c32cd24ffd31c3b7a6b7512b40378c99f99173abd8269b6cbe5d8a26d68f029a04fd7fd0a01ad6ce1a19d8d5cf9e7f6e5372c4c041dd07a6ab4c59d37abb28e157b0a90a7f24c6d859b42740785e04f7ec9a417d7e74d4eb44ff751c2edd12ee70f1b92b8d30631a31fee1aa039e126b42b7b81b815df972f11ae74266a080da573e7a684f186093614a69619ceec1a3c933869595d5158b91d90bea1604d31f63466408a4a36c029e2b892c41cd8bcfb40c5fc9fa5fffe3596a5ccf7d2cac73b1b1a4561416cc15cbd4110dfedb23ed4bdf0a3fbe3cd1b1be812b289e2679fc6b0c6b8ccdfa2b47de27133f7fb4f879b9208cda73332f1df049feda08c119eef7a86af80f10a259e9199ed9003182fe52dc8c9b57a4137cf0e9508625b123ffb693ae8417b5a58780390d42d0729fb2261066a090605e3c85572bd2609c6b01030f82f940d7fdbed5c314f5441276f2c4458596058d36e10a41a585a72866f9cb7c306a4f1e40d3502b77303e442525458a18eff321220be7509e884c97f9d4bc9ff4b60da1f5d930f30c79fa9462b43c90486f924797350a1e1a5b2562471058bbcddfe6132f8641236bca285f2c51339c9a54606cac6b6a38aca92bccceeb1c851004e2553819e0114313cc4813381741cd857bc253818adb58813e37776a11117a769f4a9030fd8ac980007551511159df6ac0d2c0950834eee199e4654cad602539d5bd2749c7c866ef02e8a421d296fc6b0498fef68e6061686ee9be6cb98ea92a5cae43a6e8d1fa05fa2d89f8fa8fa81662797a4988cca25dd4f739c21019df0b822f65cde38a0a65f1318e491fe215353153881d9b3262f143af2a391f979583c8c945dc985b6de5bc34c1608a3f270890eb8aff31da524ebf40de2a22c55ab90acaef3c24c0256e27cb064d46815f36941548a1258ac786352275ba65e69ac794581ff1ea325f93b92a5934ab9a384c54bbfe7485b74099a83a4d9d43edc82784c387dd694ce3b0473e3710a3d4a19162dbc72a34c853a9b44c226a7691aa1c9ddb56b4ccf43cf570ffda419d98a7104bb4d6210a0875e084d494b4987562840e2dd55fc091602fae24b41a580d2a1f99d4a081b42398e41cae5c55d376ec1181d1aecacba06e5f4392d6094cc4ce68662d5f012814f9982b732af505661c75108ec159719b0b2112cd70f2e39df3533fb14d5516e26513270a2085f28adb2446d4ac72373a0c6223c5ad7c15ac32ec27db4636c6e9e29297df3378f339c43ebe35720b383e1d23c05ff2053b8966eb19680775935092860a92cc095cc97b9f0d91d31d653a7f0c99ab153affeb7ca055ca3a2a3290b707bb202425b0db04886a948e4ea0fde77edecd5f93275bdcbb49a7cf6f2e08d42f474113f4b0978baf3357b5f81e40424e3e16015ffabf003c63351160d3e74f6257b71032fc6b778d237a3a184575d39d3fb126c85133c9c2f0ba983f976d3b8cfc7c10d09f69110c74d1ee8193f737642c2fe77397a0fb67d4d55238a52321ac97bbfce81bc63a6eb3f65d5d8cf294fa0e7dc53b4fe4d0dc48d123c98e5f7da50678191d5308d7c91066fa141e7dd168fbcb2cf21ceef5e0bd44ca3ad299a37a4be132670bbfd68e9b6b822f07c1eedcc021d82e079c5027b8397ffad7da9cf11afa21031f1b95e81a7e36ea009eb584b799dcecec72ffacf296af6f50c72cc3bbc3e435d1946e446a9aa7d2859f76743b7d81af65d8e39573f593bf29f24ca7bfbcf782588214e78638e3f32f5376fcfa4d8e59750fce1fc47bf8da83e6c67770b3d06645958694958326fc92337a088db0ebbbcd34b7cb4822eec0cf850e7bd88c4e69b5195b4e867898028b7b1d82cb348db3a541fa74e83cfb224ae7a0ebb14e995d0441bd99432a891f2ce21892ac71fc9a80c3d685228a18686601d219cfa2325db0"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x2, 0x3}}}}]}, {0x4}, {0x1928, 0x1, [@m_tunnel_key={0xb4, 0x9, 0x0, 0x0, {{0xf}, {0x3c, 0x2, 0x0, 0x1, [@TCA_TUNNEL_KEY_ENC_DST_PORT={0x6, 0x9, 0x4e24}, @TCA_TUNNEL_KEY_ENC_IPV6_SRC={0x14, 0x5, @rand_addr=' \x01\x00'}, @TCA_TUNNEL_KEY_ENC_IPV6_DST={0x14, 0x6, @mcast1}, @TCA_TUNNEL_KEY_NO_CSUM={0x5, 0xa, 0x1}]}, {0x49, 0x6, "809f09e7aa5c2a4f88693ffae10381961fb1d16ebb68320e536ef575e92917a2cbe501ccfe46160c06b2c2330cbe6d2e4f8dad7743c2da818ec756eb23f0afdcb197985e15"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x2}}}}, @m_simple={0xd4, 0x9, 0x0, 0x0, {{0xb}, {0x40, 0x2, 0x0, 0x1, [@TCA_DEF_PARMS={0x18, 0x2, {0x6, 0x588, 0x1, 0x0, 0x4}}, @TCA_DEF_DATA={0xc, 0x3, '\'*/^]\\]\x00'}, @TCA_DEF_PARMS={0x18, 0x2, {0x4, 0xe5d3, 0x1, 0x5, 0x2}}]}, {0x69, 0x6, "70d44c04a4b4b9fded90a2090a07446647db331958f89dd8fc6898152566c265005932d943215013880227d8a8400c78dc7f3b947cf2ca9fcdada9bbca3bbc6ca7548a4bdadc804dd77631bad04ab9b7921c01a1e0d830aa44f22f2c9dfd113950d0dc4ae0"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x3}}}}, @m_sample={0x188, 0x18, 0x0, 0x0, {{0xb}, {0x5c, 0x2, 0x0, 0x1, [@TCA_SAMPLE_TRUNC_SIZE={0x8, 0x4, 0x8}, @TCA_SAMPLE_PARMS={0x18, 0x2, {0x7, 0x3, 0x8, 0x1f, 0x7fff}}, @TCA_SAMPLE_TRUNC_SIZE={0x8, 0x4, 0x1f}, @TCA_SAMPLE_PARMS={0x18, 0x2, {0xa5, 0x0, 0x1, 0xd3, 0xffffffff}}, @TCA_SAMPLE_RATE={0x8, 0x3, 0x7ff}, @TCA_SAMPLE_TRUNC_SIZE={0x8, 0x4, 0xfffffffa}, @TCA_SAMPLE_PSAMPLE_GROUP={0x8, 0x5, 0x7}]}, {0x102, 0x6, "b4ffd523d196d18f99019fa38bca1aec6a654b81befa6ff1cb0bca72d6172b440a7763bd9f10e03f52bdba4bd4bf33e87a302342b4a92c8de405caba3444bde359c099e1b51d7933e4991d38fc24e1f44d176d4c23da8ac594dc460a3121239b054a6f3c77bba349f7bbf42b57f5bc7540ef80d24513c6d6d56939382a0af4691d417c48b14550914f1862029cc4e756265248755d9b2b1012e5aa1f4a658c1d321618bf94e0971765756c74c7366607c9e98a508bd9f401eb6b083e5c8dfcf6024c41228dbcf38a262c81a26bb46080f9e2524dde68cbb8ad83f6d00e43ebde23708d7edc2ae7d9feafbe8f23b2728fbf4eb8f9af49edd3dd25e874da62"}, {0xc}, {0xc, 0x8, {0x3, 0x1}}}}, @m_mirred={0x1b8, 0xa, 0x0, 0x0, {{0xb}, {0x124, 0x2, 0x0, 0x1, [@TCA_MIRRED_PARMS={0x20, 0x2, {{0x401, 0x9, 0x2, 0x1f, 0x8001}, 0x4}}, @TCA_MIRRED_PARMS={0x20, 0x2, {{0x20, 0xffffffff, 0x2, 0x5, 0x6}, 0x1}}, @TCA_MIRRED_PARMS={0x20, 0x2, {{0x81, 0x3ff, 0x1, 0x9fa, 0x4e}, 0x3, r7}}, @TCA_MIRRED_PARMS={0x20, 0x2, {{0x4, 0x2, 0x6, 0x401, 0xffff}, 0x2}}, @TCA_MIRRED_PARMS={0x20, 0x2, {{0x6, 0x1f, 0x1, 0x9, 0xffff8e06}, 0x2, r8}}, @TCA_MIRRED_PARMS={0x20, 0x2, {{0x87, 0x5, 0x0, 0x1, 0xa2}, 0x3, r11}}, @TCA_MIRRED_PARMS={0x20, 0x2, {{0x9, 0x3f, 0x7, 0x80, 0x1}}}, @TCA_MIRRED_PARMS={0x20, 0x2, {{0xffffffc0, 0xffffffff, 0x10000000, 0x400, 0x1000}, 0x1}}, @TCA_MIRRED_PARMS={0x20, 0x2, {{0x2, 0xffffffff, 0x7, 0x7f, 0x80000000}, 0x3, r12}}]}, {0x6c, 0x6, "ab5da86fb2ad38ac3afa3ca2d0cd2122ee2cd97d06a53895d3c0b5b34115ddcfde0ecf58b865716c834153c8bc3c50b7543b6c0cdf8c7962c3b03b6f2240b13fabb7fb29c2076b65f86abfbea4a1559990231f5d8aa2515ffaf553794a3307e98635070792d8cb64"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x3, 0x2}}}}, @m_ife={0x154, 0xa, 0x0, 0x0, {{0x8}, {0x30, 0x2, 0x0, 0x1, [@TCA_IFE_TYPE={0x6}, @TCA_IFE_TYPE={0x6}, @TCA_IFE_PARMS={0x1c, 0x1, {{0x9, 0x9, 0x20000000, 0x0, 0x400}}}]}, {0xfd, 0x6, "3b320608c3d17fbe4bb044d1d06c3fca18ad65be8a88364bbbac4ecb8418e972d8e09b9b7618d44221c4281710ae381efc8aa7d2b05e352f7d649ec32537db7b79d844225424ff3f610c493ce02e237be4335e34172da888d369f69c2fa93de8d1dd66665448475855cb69c14fd0bb565926a820d5fac76b706daff25e74e0b39b5e153783a160809ad5c42edcd88c8deb091b78e529320ed13f80fc1835d9bc0215c07394b4566382e1ce35382619011e02195e7c720286716226b955b39e61a5190cfe5a1aef22b0f2eabfdd6feaf832e716541aabffa63f97515888c96e94809565450759a673dc2e9be5142bd746d23acd37d4b11c1c09"}, {0xc, 0x7, {0x1, 0x1}}, {0xc, 0x8, {0x1, 0x2}}}}, @m_ife={0x9c, 0x1e, 0x0, 0x0, {{0x8}, {0x48, 0x2, 0x0, 0x1, [@TCA_IFE_METALST={0x34, 0x6, [@IFE_META_TCINDEX={0x4, 0x5, @void}, @IFE_META_TCINDEX={0x6, 0x5, @val=0x3}, @IFE_META_SKBMARK={0x8, 0x1, @val=0xa7d}, @IFE_META_PRIO={0x4, 0x3, @void}, @IFE_META_TCINDEX={0x4, 0x5, @void}, @IFE_META_SKBMARK={0x4, 0x1, @void}, @IFE_META_SKBMARK={0x4, 0x1, @void}, @IFE_META_TCINDEX={0x4, 0x5, @void}, @IFE_META_SKBMARK={0x8, 0x1, @val=0xffff7ff7}]}, @TCA_IFE_METALST={0x10, 0x6, [@IFE_META_SKBMARK={0x4, 0x1, @void}, @IFE_META_PRIO={0x8, 0x3, @val=0x8}]}]}, {0x2f, 0x6, "e8f0c370616d4897fcd654704142cb9e2829feb03369921f4d1e5d204aba6297eb3e0e7f3b943c1ed7653d"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x0, 0x3}}}}, @m_ctinfo={0x114, 0x1, 0x0, 0x0, {{0xb}, {0x14, 0x2, 0x0, 0x1, [@TCA_CTINFO_PARMS_CPMARK_MASK={0x8, 0x7, 0xca92}, @TCA_CTINFO_PARMS_CPMARK_MASK={0x8, 0x7, 0x7dcad435}]}, {0xd7, 0x6, "890cb057c5ed5f061f6b62a0b6322ba15166e9d57d8cd2724ed9c98ec7e8eb9762068cc536dd8afef0859d5bc54fdce694a8c0bd50dc05ed803fedc3a94973e1e52b4477940480f624ac9d05387a1e4ef68e704519dcc3ab670eeb9f25a525b64a8f81ca33a460181b8ad1445e4ebeb0d5d479e63f207a14e8f59a8e805b59b12f4747dd5b2be72b7d759e613626f04d0b6c5d26d49ba7844809eb24a6597241468fd923bc185352307b186217c66594996b5f26c7060001b9bc36813e03f86bd8e344697114ab6550478df4014771afb1462e"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x2, 0x1}}}}, @m_ct={0x64, 0x1f, 0x0, 0x0, {{0x7}, {0x20, 0x2, 0x0, 0x1, [@TCA_CT_NAT_PORT_MIN={0x6, 0xd, 0x4e22}, @TCA_CT_NAT_IPV6_MAX={0x14, 0xc, @remote}]}, {0x1d, 0x6, "4489a70062790508a1d241d3a84e5025263f4dcac759d844a1"}, {0xc}, {0xc, 0x8, {0x1, 0x1}}}}, @m_connmark={0x1050, 0x14, 0x0, 0x0, {{0xd}, {0x20, 0x2, 0x0, 0x1, [@TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x253, 0x0, 0x8, 0x9}, 0x9}}]}, {0x1004, 0x6, "6ebb8648c560751d5724e86edbb0d1336a795e412c21adeda67d66863cc67f73163bec203823c9c257bac4f5882e6cfd2d288901552f4b393cf8927a43317385297629ae4c314f255c1cc57e23b89b4eb612fcbaf4ece5afcdeb3e76d233804a3ee6907ac7f689b7187066988a0054148905795099b441b0af8a157f84554a62fc1561befb837b4233b4b2901adc804dd8b77b92d9f8673c92846f33a86b96dc6f45217bf0aa9b3d38f44f351fcc281ffb13d7e272e528e8c267400efd851ba5427cf2bd9b8bfbdd25c7297b814b7089313329d1054f22e6c7e865845cf484dcfff64ca0e39f7d5b9301c95b0a67875860e0cb1afec2b891e3bfecde1d9ad1cccd1cc4e673abcf53dd4ee68756a949f5aae8a2c56840745d12f2b269ca3561007ab156b96c144d2a6345de9cad2d138adf7461c0bbdfa812cc42df6a4efb70b28c074c461aa8827bb4555aaa41a8c50df0899cf1b44c3209e1929e03811c237331fa7c6ecf9f5135f65ae03fbe44e332916db518cc5dc88629fccdac14c4bd1b3042c3adc57e7ff7005e2f1ec90bce8c22184344f0c2bf4e4c4f3f7fed5a1358b5568d90124b54d65fea1902d3ebfca9fccc4f020a4c9d22f20a9cf745f86ba59efde1c28bd6fe6c726a67a387028ebc78834f9b0cecb0221d7292f1d45a9011ceb485befd06a067dc054ce748af5719a6b5f99735026c0f0a12ee6be3d9fcb15ac5c5ded098ab56432ad4866260d6c1f955dac4e2614af921938055114c32bc602b11ecfacec87000ee05330dc00913d586917540a3fbb1ef87d527d50eff8310e09561cee5b47c5e85bb5485d0390aab3fd697fd51fbff1edd5d4543607e34c976d162522a6a8a2dca0179b3524f8eed903ca98f7c8df7b25ce11d063db7f96e7e8ded3c96201f40edf156db80c1ea43ba4418dd10b7bbf092d973503d90f8024bc68f7640e81c6a11cd633788a8ca6bb110827cf7fb21c19e60ec3654363b8f8d4616f89404b59258a226f89a0df4ab184d7b76cc87adf788e9ecf9d2658ad2d9424b1e872ee4d6cf03acce55a768c94a465eb7654232eb4b39f453de8bc65fc731ea41d140a40359480d370bf8f7c432a838c3f9806dbd8439d20c7d06183dec12754fde3f6fc14b0e02a182d564f089fc163c1cb4905866726128929d080a21277514bdff8eb2b9e3e08f5c666063dbfb94248677983c4b64d8ef579d062770264a42cca5d84d0464b3ee8c55281c6b3c3aa3c7877d7de034cbae1553ca3f4449a8bddca7f13c90f98e4a19c7976dd03f48cde4a9ed6220583f514b324ef190015ec1608c6b104a3d1a1b154a81819e3d7bdca949c9ae09144f7f5bfcdac5e71fe022cd2df17ca1de1e301986c8a845a61374c11a2db26005b6ebb6b53d376fc613f9f6c4880c27a7b3f8c85e87a3151e320eddd67a33154ff107e2ad5dbf57da59aa0b3ba21d054d0ce04f888892afc93c37dff332ae9f518e06a3b197f5a6f4330ec266f3d82428c1d59a3a7f9c8d871f6100005ce2533de35c3294a3187548fa1ddc839196a54dba117fac43be04b95d7781cd09a0a024c6b98c0ec02529b9cc2ccfddbac211d1c639c518975b013408ab8cc6c66bcd5a76ff2148b5532e5736086c37876b8214428bb8292851d1b9894cb88d967812b8ce9c499cd87803310d2e816007721d75dd108a56de8ec827feb0bd5fab496e8f807b9a4a13f853018fa546337794af83c8a2cfe9e96a0190af5cafc5df4e5dbee45ce812f5463533e2aa95e15b1aa3b6e037ada0f9f87b88a324b1b2b41ae59af7f088c060d36aa59473f6f9408c01798679afad180fcfd95651a52dfc407ef18461178a1816d9c6c2d03b95868b51c6492814985db042216a0096705d7cc11121933a250c59ded8a5a78fe4d62ce5000b38049b5eee5fd4ff5f5f0f6fdccf99a0999c460c0af0eefa2eb725d063e9ae5cd2b284d3f2ce617dc5ac3d9a3efbe3dd4340de56836e7684e7eafc87127ad0fcf9d585393f4f459352bdfc90eedee43cd428c0334a7946ce474b4740505bf2482b02aa00b473975594a51a9d52d7f66434ae917d4fb41269cb5595adafad227e0cd0c2677e348ea4562dc3b157c97b32ddf59a292eb4aa4d8c04a0027047eb09fe247f7fc5857e00c75382447b72e3082b6afc928af9d94903b11dc3fe5800b60d3a0a42ff10e5823ebdc0a84aa050f8e2768972ff2fb0fb247e97aa9e1f9d229de7e0335dbcec3a84eb6947e79007b2ffde51cfcc76561b61c41e7c85237509e9f1c2c8227711e6827d0a18ec7cd607eee5615ab5c8b87f0b9a022c742adc7af02224d8bbd42c25da21ead470b577b5ff83497979e702703f2ba015bf66bcb9787dc07ae30a8ed785e747deda4f5d2c2f8d799daf884a6270d64e92353b99f5bc806b7f3703d6bd4f1455e8e07422abd439c3cf296e3fc325715fdcae249d302a4d45ed7e83bdbef6911c017b0a24229f55861595d56b97108eb42f336c0b06ea061decfae73ef606199b1914291588539bfcf783f693ac82285bf562f050f49381498a5bb1f88907635c66318ed87c50574e029601a08ac972345458cd69f2c8cb60f4748fde0dabea5ee5085d03f90aafdd2768919f96d0654684eb9d703f5fa0d84a4b54197acc137939621f1b6592aa529532c70c84dc7cbe9619650250b69bfd702a8e1f0a25399743aa66ae3d917b3a3bb28197fd616fa8752537ffa0c997e1fabc9b520206af891cd7d0884ff49d8a1959bbb0356769bd74c4db5b9530caeb67fc8487a9c27f452b62cfdc3292028d766f26fe057575e5b9d18fd0f45d580cb69e8aeb0470620b9ed89202e17108a4c85cf408a4a9f2192e7b4b061df7248fc665da69be540d1ea30d18a8df2fab3a676640b685414d5aeb750671418bfd9bb06b1a4c27529cc5aecc595485a225fe983f6e1ac4df9c110b13530de54d12becc3725a5f7b78d238c1ed3ff46e862f58ea81edd6a114beccf2df466021d8a2fa2ace79166ea52aac2f7cfa59ca95c336243249e1d2be9d6789163d629a09cec52a618494a419c240837695ee5c7400efda0da8a4488bce4356247884c2572ae98f79fbc1be8cab68f062deaff7f1b66ad7012b6b3c7252c0e404cda7141b666ac906aa233b27181aaf09be2497b9124a030b5039343c4abba563e4acc54b7db484b9c4bd8266049566e4844cf78a7d1859da0fb2b301178cb05954b7a5f318387d3eacc8d8cf0d1f47ed97735aa71c13e4eaf3cda94a39f54dea60d65609b2a716b3cfe059a97363b5f97b2467a4fcdc41eaade7198b17973d26a897fd3e7127c1bfd2c1155fb9e8c163875ac3ad32854a2a2bcb6856cbc57f829dae94d054fef75e615e484ac6500f8437cb99d1a81b2984093138e45480cb347c3391df2e78f143baea580874f07076adf2680ee9f835346a212bc75f17dd5a05c26b02734838d7ee7352ee6a2ec93664093fb0299172996358d812f4ea22c776c499f5765b202898c83c6415e8f2e5cb26d1c934c99749dee7eaf70089a0ff3ec99e578024f2f50bf28ccdb075520057bdc478ff28dcae29aad47cfd66f8e6de90e6659a18d21f0a301dbb92d4f9c8de54018ea4adc43832033f7496596cfd96ea70096d9bec2d658ff59dacf0124bad9a067ed129a986942747a805007ca308c109e86056b4f0aaf4ed4a4505206d54cfb8777af6f7124a2d6046f291ad71c556dd1ca9a8d2e0c6011c3a30fc96fa1090994876db5302c6c5732de98da066a8407b738caedcde567e5b054f3146508cd655e1c8a27fd2fff8d429193635a144f2c393fbbd6a8f83d5a9fe0020f50d5e74489a6b5dda26be9f9811bb5d0e0f9685e1d2ba446febbfd20e02bf24571d21316a6d3baa8d7a1a7b76d9cb9f0f20485530908e5f2b1bee7301bd4793f046ed8b3daf376a229ba9448e5b1106a9da951a04ef628e92c87fe8377fe5edc9d786c6829cd0712a0d7a36a2707c76e9c76ff26f77e96db47e5b6d2a579f45531b223ea3924370974c5699cc3059425406e872e5743365d37e34269d4c4d6be500fff04b7700ca30b4a35df5f4d7055e7524fac2c0bff835a3db22ec7d563a7f34f5200de9308e93d6be6f5b463f8ee6f7d066aa5bff9d9f22f3106ee6819f772b8b0ff5684fde997ab5700972ac7c67fef87a04d7199a6242a405588e11ea99cd0c18a0061945e7964afc2c65c37fa392abf344ad9952bc3b74f6ea5e71bae135db305789fde3d8692cf8c652838a58d340c31088d62cee48546273ebca3c05bd12a0642a98c383953d1a4a60d514ee0609589db0f76032111f0c32e889da4d0dd3c4da3e70d528062147fb3c5a08b530ac20cc08e7f0a9b2c5b69ab8696f5ca4fe31bba43170b7835646543ae1304491e4b08633c73675a33ee3677cb5bb42bbb4acfbd49d0003fd1908938aa34652b391d665e59fe2d93af4c10998d9d0ce3cc92679677e842067a71aa782c6b338162f75297b751cabf956a40cf83651ca9481d6e57b75a914234a3ac873e70e427fec94e865cf04cf347fa2ca5e87cce9bcbeb0f18fde00acac70044c5b2fbf6c5f23d92549beec26ada757266d625a69b423453bcb78ec6572f0e824d3ec1221898656b3e428cab22ed7880e0583d9ead5a9a6a4d92b460fee41389b9d220fed4aafd6346fd7f343176ae7084a81da224c7a41f2f91ec186e0b0a71dee4587e9e383b5a78472e0c0e7e82f4aba50b125e672068df7b3757ab43e4c805657689f94fa2ed24a036c8c95f66803abcee187222d126fbd9de9d21ee7bd4e78835f85e7c5c99c5e633fd883d5408889687d2dd98a24634656b9e05361a731860fc3983e3f389e52d522f28bc4d17236c829de5a44c3077cec1c69029322e4564a9a58a307674b18e149175849d78715efdc0f3270280bf9200fc9729275a79153e6420ea77181a17803b487df5c71a2959ef4518f21c255d842b2e50cf2ad55ec49523a2acda13b59181f58936fdea673633dbc897bcef3b2c245779ee10aaa81ebc8d34fbe7dbe638d98a3e3a9c6ad87bbccf9535fec5da5bf899e03975cdbba357cde3fbeef8c5bcafd0be79b059fa936cdb513eb3fa5785b49edd0ae81ce44a99466a9829f542491cdcf26b2385a925d15a2455e597b0d96f1c9a040c8e8c0bce7808abac3572dd00ae11b2aa5c852c06d59bcaff80d239b4609cbd07ce2921dbdd9fa462d4b46f07cb60a0c5f9af56c68967feaecdfd38ca658d2290c53671ccbb31acaf6deb351bf97f60aaf9a019d461d5ff34be828f5462060ed523a6c329fa108d60dabdc093abe33ac0e816212130b3520e8f035a86b5b9276ef4865cdb513d6b8ad4772744c199919fae3b7b6ede401004916d47d3b59936be3c5b4b5e32a4171d18e088206c52a593f82676ba88784313e5bcf103a08b83004e66ce41ae0d288a99b49cc6d30f539f4bf2623e990237b8a6878c14dfdf40c7ee1a1cab0440bd7cf68703a7a2165dea415982f7f3e1d4e6bbaaa24800f07b24f5a5a8fa7c58889be316e50be6ca0b88b5ab2f2f951d27db3e218b4b0bb883918fb503ca3e11f54347aedf4732e29069acd93b8042fd0784dd0c0b1a4aaf28a62b703850162b55234f03c5022acb0e5d4cb34fe3ee8d57a8894b88570a755348332f997751bd6ec51bf27f1792b70e976caa78a0c19c56c2c6465bc5cac5575220e450a504243d383be232824302d14adb7623427f9e6ee258953db86ef4c7d60357e0059e55b5c293a6febc585212a71c391b3e853425d6b018dc20"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x1, 0x3}}}}, @m_csum={0xa4, 0x7, 0x0, 0x0, {{0x9}, {0x20, 0x2, 0x0, 0x1, [@TCA_CSUM_PARMS={0x1c, 0x1, {{0x2, 0x1ff, 0x0, 0xffffffff, 0xfa}, 0x4a}}]}, {0x5c, 0x6, "02fc12910c1fa1fbd15b0a8568dc8b2221df1c7cc4f09acc637d253ccaad4df77282eadb26fdc72728e80db5d2ae50504293746c11ecd32873e29c2ed89ce95882c7eb9a3a8d1a664c3769bd1fb0ce5f356e121bbdef9855"}, {0xc}, {0xc, 0x8, {0x2, 0x3}}}}]}]}, 0x6dac}, 0x1, 0x0, 0x0, 0x40850}, 0x0) [ 2178.020597][ T7547] 8021q: adding VLAN 0 to HW filter on device bond1438 [ 2178.081399][ T7553] 8021q: adding VLAN 0 to HW filter on device bond845 10:41:54 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) pipe(&(0x7f00000001c0)={0xffffffffffffffff}) close(r1) (async) bpf$BPF_LINK_CREATE(0x1c, &(0x7f0000000000)={r0, r1, 0x1e}, 0x10) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r2) (async) ioctl$FS_IOC_SETFSLABEL(r2, 0x41009432, &(0x7f00000000c0)="3c980c66d8a95c96bc8bb505b90034b3c7ae3f64c91342831cba04ff39d809620fa7413591bcfb26c0048fa0ef888107ca144527f2928b0bd26d003cedb1e2c94ee8811280b2ba464ad23d5e525d10b95a21e80e42dd5bf93ec3e9691663860c1d6d7d8a804a49af730b537c710ac5a7a4d97b050eaf515f4375d98e97d1bdedded754f513e2dd8a2b2de2105fb325e94143aa010f90548ae874cf8a5e2baaf44d8b00b2c0c2a51d37d84f9586a9173c7e4051e84a6d03472268ce86965a567077b83b4c630d87466b9ef9de7cc694735104a2e46710a84def90aa2ac18e32aa857afbd99a89b577c6bae2215e1f660ac013f681faba99cd4d2b66658f4eec52") [ 2178.249784][ T7538] bond845: (slave bridge1002): making interface the new active one 10:41:54 executing program 4: r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000)='./cgroup.cpu/syz0\x00', 0x200002, 0x0) r1 = openat$cgroup_ro(r0, &(0x7f00000000c0)='blkio.bfq.io_merged\x00', 0x275a, 0x0) r2 = syz_init_net_socket$rose(0xb, 0x5, 0x0) ioctl$AUTOFS_IOC_ASKUMOUNT(r2, 0x80049370, &(0x7f0000000040)) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) recvmsg$qrtr(r1, &(0x7f00000008c0)={&(0x7f0000000080), 0xc, &(0x7f0000000340)=[{&(0x7f0000000100)=""/115, 0x73}, {&(0x7f0000000180)=""/174, 0xae}, {&(0x7f0000000240)=""/237, 0xed}], 0x3, &(0x7f0000000900)=ANY=[@ANYBLOB="980000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000000000000000000000900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000780000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000060167d983062030df467517df9bac443544b4b16809497d760c17f07c89179fce27249413d59502c6bdd13dd5307ea2f59c6395e49d4db983b26c0940a22c75f3c98082e7725f516f3dd9852224689329ea8e57a39160b488c63055dd19a7576ffd6d874a233da176275a77141058bbaf2583ccfd2f14b385eef0adac7ef4b55d88a4fc8e515aaa3905da687a335a2a36a78a136637057109bb3ef16c71e9ba1b4ac54aaba11d42fe8f33d26b488f7"], 0x508, 0x80}, 0x38, 0x2000) openat$cgroup_ro(r1, &(0x7f0000000380)='blkio.bfq.idle_time\x00', 0x0, 0x0) [ 2178.317918][ T7538] bond845: (slave bridge1002): Enslaving as an active interface with an up link 10:41:54 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xca020000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:41:54 executing program 2: r0 = socket$inet6_sctp(0xa, 0x5, 0x84) getsockopt$inet_sctp6_SCTP_SOCKOPT_CONNECTX3(r0, 0x84, 0x6f, &(0x7f0000000000)={0x0, 0x1c, &(0x7f0000000080)=[@in6={0xa, 0x0, 0x0, @rand_addr=' \x01\x00'}]}, &(0x7f0000000180)=0x10) (async) r1 = socket$inet_sctp(0x2, 0x1, 0x84) getsockopt$inet_sctp_SCTP_MAX_BURST(r1, 0x84, 0xd, &(0x7f0000000000)=@assoc_value={0x0}, &(0x7f0000000240)=0x8) getsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(r0, 0x84, 0x9, &(0x7f00000000c0)={r2, @in6={{0xa, 0x4e24, 0x9, @private0, 0x2}}, 0x0, 0xad34, 0x0, 0x0, 0x0, 0x0, 0x1f}, &(0x7f00000001c0)=0x9c) (async, rerun: 64) setsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(0xffffffffffffffff, 0x84, 0x9, &(0x7f0000000080)={r2, @in6={{0xa, 0x4e21, 0x1, @loopback, 0xc000000}}, 0xff, 0x4, 0x3, 0x2008, 0x5, 0x80000}, 0x9c) (async, rerun: 64) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async, rerun: 32) r4 = bpf$BPF_BTF_LOAD(0x12, &(0x7f0000000200)={&(0x7f0000000280)={{0xeb9f, 0x1, 0x0, 0x18, 0x0, 0x9f, 0x9f, 0x8, [@array={0x0, 0x0, 0x0, 0x3, 0x0, {0x4, 0x3, 0x3}}, @enum={0x7, 0x1, 0x0, 0x6, 0x4, [{0xb, 0x800}]}, @func_proto={0x0, 0x2, 0x0, 0xd, 0x0, [{0xf}, {0xd}]}, @volatile={0xe, 0x0, 0x0, 0x9, 0x3}, @datasec={0xc, 0x1, 0x0, 0xf, 0x3, [{0x5, 0x8, 0xc2e}], "7bf642"}, @struct={0xf, 0x2, 0x0, 0x4, 0x1, 0x6, [{0x1, 0x4, 0xffffffff}, {0x9, 0x2, 0xfd}]}, @typedef={0x7, 0x0, 0x0, 0x8, 0x1}]}, {0x0, [0x30, 0x71, 0x5f, 0x2e, 0x5f, 0x2e]}}, &(0x7f0000000340)=""/196, 0xc0, 0xc4}, 0x20) (rerun: 32) ioctl$FS_IOC_GETVERSION(r1, 0x80087601, &(0x7f0000000480)) (async) ioctl$FS_IOC_RESVSP(r4, 0x40305828, &(0x7f0000000440)={0x0, 0x3, 0x8, 0x3}) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r3, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:41:54 executing program 4: r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000)='./cgroup.cpu/syz0\x00', 0x200002, 0x0) r1 = openat$cgroup_ro(r0, &(0x7f00000000c0)='blkio.bfq.io_merged\x00', 0x275a, 0x0) (async) r2 = syz_init_net_socket$rose(0xb, 0x5, 0x0) ioctl$AUTOFS_IOC_ASKUMOUNT(r2, 0x80049370, &(0x7f0000000040)) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) recvmsg$qrtr(r1, &(0x7f00000008c0)={&(0x7f0000000080), 0xc, &(0x7f0000000340)=[{&(0x7f0000000100)=""/115, 0x73}, {&(0x7f0000000180)=""/174, 0xae}, {&(0x7f0000000240)=""/237, 0xed}], 0x3, &(0x7f0000000900)=ANY=[@ANYBLOB="980000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000000000000000000000900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000780000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000060167d983062030df467517df9bac443544b4b16809497d760c17f07c89179fce27249413d59502c6bdd13dd5307ea2f59c6395e49d4db983b26c0940a22c75f3c98082e7725f516f3dd9852224689329ea8e57a39160b488c63055dd19a7576ffd6d874a233da176275a77141058bbaf2583ccfd2f14b385eef0adac7ef4b55d88a4fc8e515aaa3905da687a335a2a36a78a136637057109bb3ef16c71e9ba1b4ac54aaba11d42fe8f33d26b488f7"], 0x508, 0x80}, 0x38, 0x2000) (async) openat$cgroup_ro(r1, &(0x7f0000000380)='blkio.bfq.idle_time\x00', 0x0, 0x0) 10:41:54 executing program 2: r0 = socket$inet6_sctp(0xa, 0x5, 0x84) getsockopt$inet_sctp6_SCTP_SOCKOPT_CONNECTX3(r0, 0x84, 0x6f, &(0x7f0000000000)={0x0, 0x1c, &(0x7f0000000080)=[@in6={0xa, 0x0, 0x0, @rand_addr=' \x01\x00'}]}, &(0x7f0000000180)=0x10) (async) r1 = socket$inet_sctp(0x2, 0x1, 0x84) getsockopt$inet_sctp_SCTP_MAX_BURST(r1, 0x84, 0xd, &(0x7f0000000000)=@assoc_value={0x0}, &(0x7f0000000240)=0x8) getsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(r0, 0x84, 0x9, &(0x7f00000000c0)={r2, @in6={{0xa, 0x4e24, 0x9, @private0, 0x2}}, 0x0, 0xad34, 0x0, 0x0, 0x0, 0x0, 0x1f}, &(0x7f00000001c0)=0x9c) (async) setsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(0xffffffffffffffff, 0x84, 0x9, &(0x7f0000000080)={r2, @in6={{0xa, 0x4e21, 0x1, @loopback, 0xc000000}}, 0xff, 0x4, 0x3, 0x2008, 0x5, 0x80000}, 0x9c) (async) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) r4 = bpf$BPF_BTF_LOAD(0x12, &(0x7f0000000200)={&(0x7f0000000280)={{0xeb9f, 0x1, 0x0, 0x18, 0x0, 0x9f, 0x9f, 0x8, [@array={0x0, 0x0, 0x0, 0x3, 0x0, {0x4, 0x3, 0x3}}, @enum={0x7, 0x1, 0x0, 0x6, 0x4, [{0xb, 0x800}]}, @func_proto={0x0, 0x2, 0x0, 0xd, 0x0, [{0xf}, {0xd}]}, @volatile={0xe, 0x0, 0x0, 0x9, 0x3}, @datasec={0xc, 0x1, 0x0, 0xf, 0x3, [{0x5, 0x8, 0xc2e}], "7bf642"}, @struct={0xf, 0x2, 0x0, 0x4, 0x1, 0x6, [{0x1, 0x4, 0xffffffff}, {0x9, 0x2, 0xfd}]}, @typedef={0x7, 0x0, 0x0, 0x8, 0x1}]}, {0x0, [0x30, 0x71, 0x5f, 0x2e, 0x5f, 0x2e]}}, &(0x7f0000000340)=""/196, 0xc0, 0xc4}, 0x20) (async) ioctl$FS_IOC_GETVERSION(r1, 0x80087601, &(0x7f0000000480)) ioctl$FS_IOC_RESVSP(r4, 0x40305828, &(0x7f0000000440)={0x0, 0x3, 0x8, 0x3}) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r3, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2178.691625][ T7554] bond1438: (slave bridge1337): making interface the new active one [ 2178.748485][ T7554] bond1438: (slave bridge1337): Enslaving as an active interface with an up link 10:41:54 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xfeff0000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:41:54 executing program 4: r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000)='./cgroup.cpu/syz0\x00', 0x200002, 0x0) r1 = openat$cgroup_ro(r0, &(0x7f00000000c0)='blkio.bfq.io_merged\x00', 0x275a, 0x0) (async) r2 = syz_init_net_socket$rose(0xb, 0x5, 0x0) ioctl$AUTOFS_IOC_ASKUMOUNT(r2, 0x80049370, &(0x7f0000000040)) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) recvmsg$qrtr(r1, &(0x7f00000008c0)={&(0x7f0000000080), 0xc, &(0x7f0000000340)=[{&(0x7f0000000100)=""/115, 0x73}, {&(0x7f0000000180)=""/174, 0xae}, {&(0x7f0000000240)=""/237, 0xed}], 0x3, &(0x7f0000000900)=ANY=[@ANYBLOB="980000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000000000000000000000900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000780000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000060167d983062030df467517df9bac443544b4b16809497d760c17f07c89179fce27249413d59502c6bdd13dd5307ea2f59c6395e49d4db983b26c0940a22c75f3c98082e7725f516f3dd9852224689329ea8e57a39160b488c63055dd19a7576ffd6d874a233da176275a77141058bbaf2583ccfd2f14b385eef0adac7ef4b55d88a4fc8e515aaa3905da687a335a2a36a78a136637057109bb3ef16c71e9ba1b4ac54aaba11d42fe8f33d26b488f7"], 0x508, 0x80}, 0x38, 0x2000) (async) openat$cgroup_ro(r1, &(0x7f0000000380)='blkio.bfq.idle_time\x00', 0x0, 0x0) 10:41:54 executing program 2: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) socket$qrtr(0x2a, 0x2, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r1 = openat$cgroup_ro(r0, &(0x7f0000000000)='blkio.throttle.io_service_bytes_recursive\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r1, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r1, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e11cd1bf40efe7ac4d45bbf501328aa18d188ff2a76425b9fa853396b2316e9787d8c6247e9fe04ecae25ee1d48943d42784b8f3", @ANYRES32=r1, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r1, 0x0, 0x0) r2 = openat$cgroup_int(r1, &(0x7f0000000080)='blkio.throttle.write_iops_device\x00', 0x2, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r2, 0x40086607, &(0x7f00000000c0)=0x40) 10:41:55 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xd80b0000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:41:55 executing program 2: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) socket$qrtr(0x2a, 0x2, 0x0) (async) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r1 = openat$cgroup_ro(r0, &(0x7f0000000000)='blkio.throttle.io_service_bytes_recursive\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r1, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r1, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e11cd1bf40efe7ac4d45bbf501328aa18d188ff2a76425b9fa853396b2316e9787d8c6247e9fe04ecae25ee1d48943d42784b8f3", @ANYRES32=r1, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r1, 0x0, 0x0) (async, rerun: 64) r2 = openat$cgroup_int(r1, &(0x7f0000000080)='blkio.throttle.write_iops_device\x00', 0x2, 0x0) (rerun: 64) ioctl$EXT4_IOC_GROUP_EXTEND(r2, 0x40086607, &(0x7f00000000c0)=0x40) 10:41:55 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) write$binfmt_script(r0, &(0x7f0000000080)={'#! ', './file0', [{0x20, 'memory.events\x00'}, {0x20, '$--!\''}], 0xa, "a08be2b67d38ea0ac3318f81a3b1bfebd323bfda9663f5bcd09076eddc1112d0c4e303a0625f51780bd71ba3d28631a64454437e00eec4478c24350e037d2dbcf9b448accc9d75a2bb822d83f03e081799a1bfad692f0de4ae73e277bd731e769259a5080575a603829466f18b412efc6f62ade696a6b70efaab5c9b2837c17bf5ab7a836f9e0b8de4ef825774045f5c84f23bbfd1ceb9467a52802b97b9e5119533830f882057f11ad9a8d7870a7a21ee26f41b82d22ae69a9c411a743b3e62cb7175f4f58e"}, 0xe6) r1 = bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000240)={0x11, 0x3, &(0x7f0000000180)=ANY=[@ANYBLOB="1800000000000004000000000000000095"], &(0x7f00000000c0)='syzkaller\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) r2 = bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000200)={&(0x7f00000004c0)='contention_begin\x00', r1}, 0x10) r3 = socket$inet6_icmp_raw(0xa, 0x3, 0x3a) r4 = bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000300)={0x18, 0x5, &(0x7f0000000080)=@framed={{0x18, 0x0, 0x0, 0x0, 0x3000}, [@alu={0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0xffffffffffffffff}, @jmp={0x5, 0x0, 0x2, 0x0, 0x0, 0xfffffffffffffffe}]}, &(0x7f0000000000)='syzkaller\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) r5 = socket$inet6_udp(0xa, 0x2, 0x0) ioctl$BTRFS_IOC_START_SYNC(r5, 0x80089418, &(0x7f00000001c0)=0x0) ioctl$BTRFS_IOC_SNAP_CREATE_V2(r4, 0x50009417, &(0x7f0000000800)={{}, r6, 0x0, @inherit={0x48, &(0x7f00000000c0)=ANY=[]}, @subvolid=0x1f}) ioctl$BTRFS_IOC_SNAP_CREATE_V2(r2, 0x50009417, &(0x7f0000000500)={{r3}, r6, 0x8, @inherit={0x58, &(0x7f0000000000)={0x0, 0x2, 0xa110, 0x4, {0x0, 0x1f, 0x6, 0xaac, 0xfff}, [0x9, 0x26fc]}}, @devid}) r7 = openat$tun(0xffffffffffffff9c, &(0x7f0000000080), 0x2, 0x0) r8 = socket$nl_route(0x10, 0x3, 0x0) r9 = socket$inet6_sctp(0xa, 0x5, 0x84) sendmmsg$inet6(r9, &(0x7f0000005900)=[{{&(0x7f0000000180)={0xa, 0x0, 0x0, @private1}, 0x1c, &(0x7f0000001680)=[{&(0x7f00000001c0)="1a", 0x1}], 0x1}}, {{&(0x7f0000002c80)={0xa, 0x0, 0x0, @ipv4={'\x00', '\xff\xff', @private=0xa010101}}, 0x1c, &(0x7f0000004180)=[{&(0x7f0000002d00)="92", 0x1}], 0x1}}], 0x2, 0x4000040) ioctl$BTRFS_IOC_BALANCE_PROGRESS(0xffffffffffffffff, 0x84009422, &(0x7f0000001740)={0x0, 0x0, {0x0, @usage, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, @struct}, {0x0, @usage, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, @struct}, {0x0, @struct}}) ioctl$BTRFS_IOC_SCRUB_PROGRESS(r9, 0xc400941d, &(0x7f00000007c0)={r10, 0x6, 0x6}) ioctl$BTRFS_IOC_SCRUB(r8, 0xc400941b, &(0x7f0000000940)={r10, 0x3f, 0x1, 0x1}) ioctl$BTRFS_IOC_DEV_INFO(r7, 0xd000941e, &(0x7f00000004c0)={r10, "57149989cf1136de6b93f2f3e5ead599"}) ioctl$BTRFS_IOC_SNAP_DESTROY_V2(r0, 0x5000943f, &(0x7f0000000200)={{r0}, r6, 0x0, @inherit={0x68, &(0x7f0000000180)={0x0, 0x4, 0x0, 0xfffffffffffffffb, {0x10, 0x0, 0x100, 0x8, 0x3ff}, [0x40, 0x0, 0x80000001, 0x9]}}, @devid=r10}) ioctl$BTRFS_IOC_SCRUB(r1, 0xc400941b, &(0x7f0000001d80)={r11, 0x993b, 0xfffffffffffffffb}) ioctl$BTRFS_IOC_SCRUB_PROGRESS(r5, 0xc400941d, &(0x7f0000002180)={r12, 0xfffffffffffffff7}) 10:41:55 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) r1 = accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r2 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r2, 0x0, 0x0) (async) sendmsg$nl_route(r2, 0x0, 0x0) openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) (async) r3 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r3, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) (async) r4 = openat$cgroup_ro(r3, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r4, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r4, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r4, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r4, 0x0, 0x0) r5 = socket$nl_route(0x10, 0x3, 0x0) socket(0x1, 0x803, 0x0) (async) r6 = socket(0x1, 0x803, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r5, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000180)=@newlink={0x3c, 0x10, 0x403, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @vlan={{0x9}, {0x4}}}, @IFLA_MASTER={0x8, 0x4, r7}]}, 0x3c}}, 0x0) ioctl$ifreq_SIOCGIFINDEX_batadv_hard(r0, 0x8933, &(0x7f0000000040)={'batadv_slave_0\x00'}) (async) ioctl$ifreq_SIOCGIFINDEX_batadv_hard(r0, 0x8933, &(0x7f0000000040)={'batadv_slave_0\x00', 0x0}) r9 = socket$nl_route(0x10, 0x3, 0x0) r10 = socket(0x1, 0x803, 0x0) getsockname$packet(r10, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r9, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000180)=@newlink={0x3c, 0x10, 0x403, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @vlan={{0x9}, {0x4}}}, @IFLA_MASTER={0x8, 0x4, r11}]}, 0x3c}}, 0x0) (async) sendmsg$nl_route(r9, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000180)=@newlink={0x3c, 0x10, 0x403, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @vlan={{0x9}, {0x4}}}, @IFLA_MASTER={0x8, 0x4, r11}]}, 0x3c}}, 0x0) ioctl$sock_ipv4_tunnel_SIOCADDTUNNEL(r1, 0x89f1, &(0x7f0000000080)={'syztnl0\x00', &(0x7f0000000180)={'erspan0\x00', 0x0, 0x8000, 0xc0, 0x4bffa6b6, 0xc33, {{0x30, 0x4, 0x2, 0x10, 0xc0, 0x64, 0x0, 0x0, 0x4, 0x0, @local, @multicast1, {[@lsrr={0x83, 0x27, 0x88, [@loopback, @remote, @dev={0xac, 0x14, 0x14, 0x1f}, @dev={0xac, 0x14, 0x14, 0x39}, @loopback, @multicast2, @remote, @private=0xa010100, @broadcast]}, @cipso={0x86, 0x31, 0xa68f1957ca170c49, [{0x7, 0x5, "2b68b5"}, {0x0, 0xc, "bd6a909a9899389fcb78"}, {0x1, 0x2}, {0x7, 0xc, "317b8617345c148f4008"}, {0x6, 0xc, "e48931abd84a84bf40fc"}]}, @ra={0x94, 0x4, 0x1}, @rr={0x7, 0xb, 0xf9, [@private=0xa010100, @multicast1]}, @timestamp_addr={0x44, 0x44, 0xc1, 0x1, 0xa, [{@rand_addr=0x64010100, 0x9}, {@initdev={0xac, 0x1e, 0x1, 0x0}, 0x1000}, {@multicast1, 0x8001}, {@rand_addr=0x64010101, 0x40}, {@local, 0x2000}, {@initdev={0xac, 0x1e, 0x0, 0x0}, 0x1}, {@initdev={0xac, 0x1e, 0x0, 0x0}, 0xfffffff8}, {@private=0xa010100, 0x5}]}]}}}}}) sendmsg$nl_route_sched(r4, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f00000000c0)={&(0x7f0000000400)=@newtaction={0x6dac, 0x30, 0x200, 0x70bd2c, 0x25dfdbfb, {}, [{0x4d4, 0x1, [@m_mpls={0x14c, 0x17, 0x0, 0x0, {{0x9}, {0x60, 0x2, 0x0, 0x1, [@TCA_MPLS_TC={0x5, 0x6, 0x7}, @TCA_MPLS_LABEL={0x8, 0x5, 0xe1cc8}, @TCA_MPLS_LABEL={0x8, 0x5, 0xbb4b3}, @TCA_MPLS_PARMS={0x1c, 0x2, {{0x401, 0x7fffffff, 0x7, 0x3, 0x6}, 0x2}}, @TCA_MPLS_LABEL={0x8, 0x5, 0x8ff28}, @TCA_MPLS_PROTO={0x6, 0x4, 0x88f5}, @TCA_MPLS_LABEL={0x8, 0x5, 0xa9f14}, @TCA_MPLS_PROTO={0x6, 0x4, 0x9000}, @TCA_MPLS_TC={0x5, 0x6, 0x6}]}, {0xc1, 0x6, "8ba73108021a0532b17f53d4d7d5a4a5b7f12647d76baa978c4eb1b9ddc7f67e6bf7989c39079c02983698608144fafb09425343ca58f160fd1a06456cfbe30512db8afb3804f0194840271a965ce77c7dbd9590b46202e83d6c119bd091833989e4c04635f80869f68544fd33310f22542e6b10a391f443b32037d18aad683c90d84cc1e300f778c18e202534caa59ba39b30b514d1e5d9bc356c1c3f42acf56c09986096b019504fefb866326a18ca4020383a28736f7f7eb7441c21"}, {0xc, 0x7, {0x1, 0x1}}, {0xc, 0x8, {0x0, 0x1}}}}, @m_xt={0xc0, 0x15, 0x0, 0x0, {{0x7}, {0x14, 0x2, 0x0, 0x1, [@TCA_IPT_INDEX={0x8, 0x3, 0x5}, @TCA_IPT_INDEX={0x8, 0x3, 0x3}]}, {0x86, 0x6, "6fff40c44e65569c9d45c0be51936f5ccfe37efcd474a9d04aa434d88165ff17625bb184058fc9d66ae35373254d6a83cb8d18244c613b9dea81bda7d79abc2fe88eb0a61bb9ab1c62637bc7ffc5b37b9e0d89aa0f920c1943e94594aee3f5641245060900abb6f384cc585afdb7f02557b4bf010b22cc80bd82f065df522e44075e"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x1, 0x3}}}}, @m_connmark={0xfc, 0x9, 0x0, 0x0, {{0xd}, {0xac, 0x2, 0x0, 0x1, [@TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x4, 0x80000001, 0x5, 0x8, 0x6}, 0xa89}}, @TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x8, 0x963e, 0x5, 0x703f, 0x9}, 0x8}}, @TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x25f0, 0x400, 0x2, 0xfffffffc, 0x8}, 0x9}}, @TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x3ff, 0x9, 0x8, 0x7, 0x6}, 0x4}}, @TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x6, 0x29, 0x6, 0x6, 0x2}, 0xca5e}}, @TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x5, 0x7f, 0xffffffffffffffff, 0x1, 0x81}, 0x2}}]}, {0x24, 0x6, "c387dbe2c6dd6d34897459526e144ec5ac3b8c9b8218f27fea4b843326c185f9"}, {0xc}, {0xc, 0x8, {0x1, 0x2}}}}, @m_mpls={0xb8, 0x1a, 0x0, 0x0, {{0x9}, {0x24, 0x2, 0x0, 0x1, [@TCA_MPLS_TTL={0x5, 0x7, 0x40}, @TCA_MPLS_PROTO={0x6, 0x4, 0x88f8}, @TCA_MPLS_TTL={0x5, 0x7, 0x46}, @TCA_MPLS_TTL={0x5, 0x7, 0x8e}]}, {0x6c, 0x6, "3bc9286a2fe4c2e2c932b2e656d8583eec41af4595023cc4d6b4de02be435be75708fab2f04cce87a1467245514e2ffb90316bb58d2657ebfb644e7706519a9f76832274acdd474324a355883defa9275b65692ea3683eba43fe21e523ce14690d14eace71b7a2fa"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x2, 0x2}}}}, @m_ctinfo={0x110, 0x8, 0x0, 0x0, {{0xb}, {0x4c, 0x2, 0x0, 0x1, [@TCA_CTINFO_PARMS_DSCP_MASK={0x8, 0x5, 0x86c}, @TCA_CTINFO_PARMS_DSCP_MASK={0x8, 0x5, 0xd09}, @TCA_CTINFO_ACT={0x18, 0x3, {0x3968, 0x3f, 0x0, 0xe1ae, 0x6}}, @TCA_CTINFO_PARMS_DSCP_MASK={0x8, 0x5, 0x9e}, @TCA_CTINFO_ACT={0x18, 0x3, {0x3, 0x3, 0x5, 0xfffffffe, 0x2408eaf6}}]}, {0x99, 0x6, "b5019a429f1cc5a17c713a62d95586ce546dabd8bc0cc16edfab0f71357cdb428381534a495848655ed999837fbe8d129f7f5e9e5014bf54d0136fcd5f3809260b465f1fb0b584c62289695fe7faf1d51fabbff0fccc785b7e49a04ea9d450fce2c64cf7175dfbb43de8da60f33255c9a950a36c7393df50a22f64da4072c38184630f75ab1f1a3ac4c866a96054f115cd5a04dfd1"}, {0xc}, {0xc, 0x8, {0x2, 0x3}}}}]}, {0x60c, 0x1, [@m_connmark={0x158, 0x18, 0x0, 0x0, {{0xd}, {0x3c, 0x2, 0x0, 0x1, [@TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x3f, 0x1, 0x10000000, 0x20, 0xed3a}, 0x1}}, @TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x7, 0x2, 0x20000000, 0x9, 0x4971}, 0x1}}]}, {0xed, 0x6, "ef1e8908a063b1a4702405531a175033a476526dfa656a00d505677b10ee9ff65f780e80bc862661ffceeffd5a10e8d32a1fe08e5b24b0fb87a4812d8ee82375928c1a9ac08af2c6dd1e378e3d21b24b555cc06166f2e8a1a92b8fa2be12b5cec402d27706362816ddafb2ee918c507c788baf60d8c3ce1480db393bd8e8d7562ad84551395f6a163b3a74be6648ba27b378274440f2173dd1d70ca8b7404ccc9dbe74ae4c115ac0996b02ca3cb472666b41a88d3bb5d4cd87e52e9accba0a28d37fb7a1ea027007a7a3083249bdaf7175ef8dac9fb255ee6341a89271a703fb537a45d658df889df9"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x1, 0x2}}}}, @m_vlan={0x60, 0x12, 0x0, 0x0, {{0x9}, {0x30, 0x2, 0x0, 0x1, [@TCA_VLAN_PUSH_VLAN_ID={0x6, 0x3, 0x88e}, @TCA_VLAN_PUSH_VLAN_PROTOCOL={0x6, 0x4, 0x88a8}, @TCA_VLAN_PARMS={0x1c, 0x2, {{0x0, 0x200, 0x0, 0x2, 0x4}, 0x3}}]}, {0x7, 0x6, "80c4b0"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x3, 0x3}}}}, @m_csum={0xe8, 0x1c, 0x0, 0x0, {{0x9}, {0x20, 0x2, 0x0, 0x1, [@TCA_CSUM_PARMS={0x1c, 0x1, {{0x43, 0x2, 0x3, 0x4, 0x4ed}, 0x7c}}]}, {0x9e, 0x6, "8d2d1a4fc178bdd9af7215770320ee5d189796b9523885d646b9faad72171995ba0cb9b53f04e5fed40ddf5887ad50a044d785a2b6e11736e8f506ee3925b6cd964a3c754d5e75751422e595c3a681e54e8dbd9c573a21a9124392cff8b41422d1cceefa07091cda24e9e334d5b83cdeecfbd53be23d85a20c1e27855b9b1bdccb118e59bdb68e52b174d58f58c1014ef0a59d0a280533d7c5c2"}, {0xc, 0x7, {0x1, 0x1}}, {0xc, 0x8, {0x1, 0x3}}}}, @m_ct={0x120, 0x13, 0x0, 0x0, {{0x7}, {0x64, 0x2, 0x0, 0x1, [@TCA_CT_PARMS={0x18, 0x1, {0x7, 0x0, 0x3, 0x9, 0x1}}, @TCA_CT_MARK_MASK={0x8, 0x6, 0x9}, @TCA_CT_NAT_IPV6_MIN={0x14, 0xb, @remote}, @TCA_CT_NAT_IPV4_MIN={0x8, 0x9, @multicast1}, @TCA_CT_LABELS={0x14, 0x7, "e817e941fccd601a0241835ba7bf6acf"}, @TCA_CT_ACTION={0x6, 0x3, 0x1a}, @TCA_CT_NAT_PORT_MIN={0x6, 0xd, 0x4e24}]}, {0x97, 0x6, "c1220e078d92ad552f095b77da0c43c010315f85d69609130bd223c1257ca62f2ea94b2203ff230898e2afe5c7175f57dde2aeee8bd716cd86c1bec5275aeadb78c16e27aaaa47b0d1ca3142be7316e89d44ae4d73148aad37fb64ab91fd7289c874d6bcbfb874bace16f13e8f04cedff00022370917f89069a874267853f3a83697bd7fdce8c59761c1d90ef95d4f4d246db3"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x1, 0x3}}}}, @m_bpf={0x11c, 0x11, 0x0, 0x0, {{0x8}, {0x10, 0x2, 0x0, 0x1, [@TCA_ACT_BPF_NAME={0xc, 0x6, './file0\x00'}]}, {0xe5, 0x6, "6b9dab9f6fd939cc8613e252c3955e11b7698e4be3ce6e84222a2d535aea2af1288c77e2f1c4d06fa5cc837a86730cd7b37f67ed44c6596d420101516176be79b9489b878142e831fdc105abbf8da0255781d0e58195d79d80c71d8bc1e3b8bc515b8d6e2968e075897670729dbba840171bb38595cca57fe1982b190646ad4c6129c808b32748adac3b9f6bdaf889e962e822ac10869126c0190aaec2637a5b0fafdc782835d05ffc83f312d2ef25031e02ad44cd5e09ad29ace41583b95517ca021d03cdf7367eb9eb680038c4664bd407dfaf73c919ced26f722c160b2c6254"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x1, 0x2}}}}, @m_ct={0x12c, 0x1f, 0x0, 0x0, {{0x7}, {0x20, 0x2, 0x0, 0x1, [@TCA_CT_NAT_IPV6_MIN={0x14, 0xb, @private2={0xfc, 0x2, '\x00', 0x1}}, @TCA_CT_ZONE={0x6, 0x4, 0x15}]}, {0xe5, 0x6, "3a6787e64b422053ee5a680a0634a4b4209e0f088a82c84842852c43fd1fc291c1bb3da34346ec12fa321aceed055f6f89391bcc05c4663003f2339afa7966cbccef2f42731d34f0efa4d7c7c5e03fa0b9203065eabdc9449b13a2ba3a2194d85aa1903324808e338b6ebd6dfff0b08e93fc6ebb456e36033756efc255d0c6629d64a8918c5ef04c2fb6da65a2015dc73cd1a5c46b09f4b8f488a26d523d2d639cc89bcd4e458c6b4c67572f164eabd9f1f80b03435a04c78b579be348e9d5c3a11d234bb763e0065b39e72859d95e36515c1db55ef47c1bed4d051ceefdc2ea79"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x1, 0x3}}}}]}, {0x1b1c, 0x1, [@m_nat={0x15c, 0x20, 0x0, 0x0, {{0x8}, {0x54, 0x2, 0x0, 0x1, [@TCA_NAT_PARMS={0x28, 0x1, {{0x8, 0x6, 0x2, 0x100, 0x10001}, @broadcast, @multicast1, 0xff000000, 0x1}}, @TCA_NAT_PARMS={0x28, 0x1, {{0x1, 0x0, 0x7, 0x8, 0x233}, @broadcast, @remote, 0xffffff00}}]}, {0xe3, 0x6, "0e91a33d18b46768bf314c74682c8c90d6e614cd533cdb45d483fe747a83398983b4f9f116baf43e65d6a6274d035ea99ede57842a5937ae74998cb4bb7498eb79c5c691a480758d3a178707c20a0ed7943bd91ab813caeb2b87a46149a6ac6331d350f99c74ec334003de2dfafa49fe6254530a6282bcfd2a4499321b9fa752dab47b79eaccede9200302236238f2df99d801a91367aae0c23691970e7eeefa52e947b154df370c59d901c3ba7ce382ddfaa5c2e2cc56864f528a6dc79110edc484a210f4e8fd6c1fafd2ecd91fbcd022aa2eeae993fc9132bddb8b5405fa"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x2, 0x1}}}}, @m_police={0x1274, 0xc, 0x0, 0x0, {{0xb}, {0x117c, 0x2, 0x0, 0x1, [[@TCA_POLICE_RESULT={0x8, 0x5, 0x6}, @TCA_POLICE_PEAKRATE={0x404, 0x3, [0x2, 0x8, 0x2, 0x5, 0x20, 0x6, 0x2, 0x7da, 0x2, 0x3, 0x1ff, 0x800, 0x0, 0x800, 0x7fffffff, 0x4, 0x101, 0x40, 0xff, 0xe5, 0x0, 0xa2c0, 0x6, 0x100, 0x7f, 0xfa, 0x1, 0x5, 0x1, 0x5, 0xd0e, 0xcc, 0x80000000, 0xaf, 0x9, 0x6, 0xffffff7a, 0x0, 0x473d, 0xe79, 0xc2bf, 0x9, 0x4, 0x4, 0x6c03d9d6, 0x81, 0x6, 0x7fffffff, 0x6, 0x1, 0xa2d, 0x8625, 0x7fff, 0xffffff01, 0x80000000, 0x0, 0x3, 0x1, 0x0, 0x9, 0x0, 0x2, 0x1000, 0x2, 0x6204, 0x5, 0x8, 0x8, 0x0, 0x4, 0xfffffffc, 0x800, 0x502, 0x766, 0x1ff, 0x80000000, 0x7, 0x5, 0xff, 0x6, 0x7ff, 0x8000, 0x7, 0x7, 0x8, 0x20, 0x8001, 0x9, 0xfffd, 0xca5c, 0x6, 0x2, 0x3, 0x2, 0x3, 0x6, 0x9, 0xfffffff7, 0xffff, 0x7, 0x7ff, 0x80, 0x1ff, 0x7, 0xd62d, 0xf00, 0xffff, 0x0, 0x101, 0x0, 0x1, 0x400, 0x6, 0x7fff, 0x3c, 0xd0f, 0xfffffff9, 0xee, 0x2, 0x1, 0xffffffff, 0x6, 0x3, 0x2, 0xffffffff, 0x72f3, 0x236, 0x0, 0x9, 0x3, 0x7fffffff, 0x20, 0x4, 0x1, 0x9, 0x9, 0x7, 0x3f, 0x3, 0x9, 0x7b, 0x83c, 0x4, 0x1, 0x7f, 0x400, 0x0, 0x1, 0x1, 0x100, 0x8, 0x8001, 0x6c2a, 0x7fffffff, 0x10000, 0x401, 0x0, 0x3, 0x1ff, 0x3, 0x4, 0x200, 0x5, 0x3, 0xefb, 0x1, 0x1, 0x7, 0xff, 0xe7b3, 0x148, 0x2, 0x401, 0x7, 0x7, 0x9, 0xfffffcaa, 0x3, 0x401, 0x6, 0x8001, 0x9, 0xd724, 0x5, 0x426, 0x3, 0x2, 0x8, 0x64, 0xfff, 0x3ffc, 0x0, 0x0, 0xe1, 0x9, 0x10000, 0x6, 0xfe000000, 0xae31, 0x41, 0xffff, 0x10000, 0x3f, 0x80000000, 0x200, 0x9, 0x0, 0x3, 0xffffffff, 0x101, 0x5, 0x8000, 0xff, 0x3f, 0x8, 0x7fff, 0x7fff, 0x8, 0x0, 0x2, 0x4, 0x4, 0x800, 0x169b, 0x0, 0x3, 0x2, 0x1f, 0x9, 0x9, 0x6, 0x4, 0x3e000000, 0x3f, 0x6, 0x8, 0xf195, 0x9, 0x101, 0x8, 0x24000, 0x1, 0x1, 0x2, 0x8, 0x6, 0x2, 0xffffff7f, 0x5c34, 0xffffffff, 0x5e22339, 0x800, 0x4, 0x7]}, @TCA_POLICE_AVRATE={0x8, 0x4, 0x17}, @TCA_POLICE_RATE64={0xc, 0x8, 0x4}, @TCA_POLICE_RATE64={0xc, 0x8, 0x4}, @TCA_POLICE_RATE={0x404, 0x2, [0x2000000, 0x6, 0x3, 0x49c2, 0x2, 0x7, 0x8001, 0xfffffffb, 0x60f, 0x2ee, 0x7, 0x4, 0xffffffff, 0xc14, 0x0, 0x8, 0xff, 0x3, 0x73a4, 0x200040, 0x8000, 0x6, 0xcc, 0x87e, 0x6, 0x7, 0x2, 0x7f, 0xffff, 0x7, 0x7, 0x140000, 0x8001, 0x8001, 0x5c5, 0x3f, 0x9, 0xda37, 0x5, 0x101, 0x5, 0x9d, 0x4, 0x64, 0x9, 0xc4b, 0x1, 0x8000, 0x100, 0x9, 0x7fffffff, 0x5156, 0x5, 0x8001, 0x80000000, 0x1, 0x7, 0x7, 0x1ff, 0xffff, 0x3, 0x5, 0xdc16, 0xfffffff8, 0x51, 0x800, 0x83, 0xffff2fdc, 0x7fffffff, 0x10001, 0x7f, 0x100, 0x1, 0x8, 0x6, 0xc0, 0xff, 0x7fffffff, 0xffff0000, 0x0, 0x9, 0x0, 0x0, 0xffffffff, 0x1, 0xfe0, 0x71, 0x10001, 0x0, 0xf27, 0x8, 0x10001, 0x8000, 0x7, 0x80000000, 0xb5, 0x7fffffff, 0x913, 0x0, 0x0, 0x8f7, 0x7, 0x7, 0x88, 0x1, 0xffffffff, 0x0, 0xfffffffe, 0x3, 0x6, 0x7, 0x0, 0x3, 0x1, 0x2, 0x0, 0x4, 0xbd06, 0x0, 0x60, 0x2, 0x8aec, 0xffff8000, 0x1ff, 0x1000, 0x8, 0x6, 0x8, 0x20, 0x8, 0x56, 0xa2a, 0x2, 0xad1b, 0x82f4, 0x6, 0x24f, 0xffff, 0x0, 0x80, 0x4, 0x7ff, 0x5, 0xfffff001, 0x4, 0x5, 0x9, 0x5, 0x2, 0x400, 0x6, 0x4, 0x401, 0xfffffff9, 0x1, 0x3, 0x3f, 0x1f, 0x3, 0x2, 0x100, 0x8, 0x40, 0x0, 0x8, 0x3, 0x20, 0x897, 0x5ad, 0x5c2d, 0x6, 0x101, 0x4, 0x10001, 0x571, 0x400, 0x7, 0xfffffff8, 0x1, 0x5, 0x1, 0x95a, 0x10001, 0x8, 0x9, 0x1, 0x1a05, 0xbe, 0x10000, 0x6, 0x4f, 0x7, 0x3ff, 0x3f, 0x8, 0x2, 0x2, 0x389, 0x114c, 0x9, 0x8, 0x0, 0x20, 0x1, 0x4, 0x3, 0x0, 0x0, 0x0, 0x80, 0x2, 0x2, 0xfffffffe, 0x1f, 0x1, 0x1, 0x2, 0x6, 0x2e72, 0x5ca, 0x8001, 0x4, 0x9, 0x100, 0x80000001, 0x7f, 0x5, 0x7, 0x3, 0x6, 0x1, 0x6, 0x0, 0x9, 0x0, 0x8, 0x0, 0x0, 0x5, 0x6, 0x7fff, 0x1, 0x80000000, 0xb45, 0x7, 0xd2, 0xa2, 0x6, 0x0, 0xffff, 0x5, 0x46e, 0x7fffffff, 0x5, 0x4]}, @TCA_POLICE_TBF={0x3c, 0x1, {0x3, 0x20000000, 0x101, 0x800, 0x100, {0x0, 0x2, 0x9, 0xffff, 0x9, 0x9}, {0x0, 0x1, 0x2, 0x4, 0x5, 0x8001}, 0x1, 0xfffeffff, 0x4}}], [@TCA_POLICE_RESULT={0x8, 0x5, 0x6f}, @TCA_POLICE_TBF={0x3c, 0x1, {0x3, 0x0, 0x81, 0x4, 0x9, {0xff, 0x1, 0x1997, 0xffff, 0x2635, 0x40}, {0x3, 0x2, 0xfff, 0x3, 0x1, 0x7}, 0x5, 0x6, 0x436}}, @TCA_POLICE_RATE64={0xc}, @TCA_POLICE_RESULT={0x8, 0x5, 0x4}, @TCA_POLICE_TBF={0x3c, 0x1, {0xffffffff, 0x7, 0x3, 0x3ff, 0x64000000, {0x7f, 0x2, 0x8000, 0x8001, 0x2, 0x19}, {0x9, 0x3, 0x4, 0x85a, 0x0, 0x6656}, 0x5, 0x3ff, 0x81}}, @TCA_POLICE_AVRATE={0x8, 0x4, 0x7}, @TCA_POLICE_AVRATE={0x8, 0x4, 0x5}], [@TCA_POLICE_AVRATE={0x8, 0x4, 0x8}, @TCA_POLICE_RATE64={0xc, 0x8, 0x9}, @TCA_POLICE_AVRATE={0x8, 0x4, 0x6}, @TCA_POLICE_TBF={0x3c, 0x1, {0x4, 0x2, 0x80000001, 0x3, 0x6, {0x6, 0x2, 0x3ff, 0x0, 0x7fff, 0x5}, {0x4, 0x1, 0xf456, 0xffff, 0x35, 0x40}, 0xff, 0x8, 0x8}}], [@TCA_POLICE_RATE={0x404, 0x2, [0x2, 0x7fff, 0x5, 0x3, 0x4fb, 0x7ff, 0x0, 0x3ff, 0x7, 0xffff, 0x3, 0x1ff, 0xbf, 0x7fffffff, 0x8, 0xff, 0x8000, 0x80000001, 0x0, 0x2, 0x80, 0xfffffff9, 0x7, 0x8, 0x1000, 0x7, 0xa8a6, 0xfffffffb, 0x0, 0x0, 0x1c, 0xf11c, 0x101, 0x800, 0x1, 0x1, 0x7ff, 0x0, 0x7, 0x8, 0x0, 0x200, 0x6, 0x9b0c, 0x3, 0x2, 0x9, 0x9, 0x2, 0xafd, 0xffffffff, 0x3, 0x0, 0xf10, 0x2, 0x80000001, 0xa3bc, 0xfff, 0x2, 0x100, 0x3, 0x8, 0x1, 0xffff8c67, 0xbb6b, 0x3, 0x800, 0xfffffff9, 0x8, 0x34, 0xffffffff, 0x4, 0x40, 0x7, 0x0, 0x2, 0x2, 0x2, 0xfffffffe, 0x9, 0x5, 0x3f, 0x2, 0x6, 0x401, 0x8, 0x72, 0x5, 0x529, 0x4, 0x76d, 0xd47, 0x7, 0x8, 0x8001, 0x7, 0x5, 0x80000000, 0x81, 0xffff, 0x16b7, 0xd0, 0x8001, 0x5, 0xb5, 0x8001, 0x7, 0x5a, 0x10001, 0x3, 0x100, 0x0, 0x8000, 0x8, 0x8, 0x2, 0x1, 0x6, 0xffff, 0x1, 0xb8, 0x3f, 0x1, 0x3, 0x6, 0xa1a, 0x7, 0x6, 0x120e, 0xfffffff8, 0x2, 0x1, 0xf7, 0x2, 0x6, 0xfffffff8, 0x0, 0x7, 0x5, 0xfffffffc, 0x7ff, 0x9c, 0x110, 0x9, 0x562, 0x4e80, 0x0, 0x0, 0x9, 0x6, 0x5a0, 0x400, 0x30, 0xffff8fef, 0x8000, 0x800, 0x7ff, 0xffffffff, 0x3, 0x8, 0x2, 0x20, 0x63, 0x9, 0x8001, 0x4, 0x6, 0x4, 0x3e0000, 0x8001, 0xfffffffb, 0x0, 0x3f, 0x800, 0x1, 0x1487, 0x1ff, 0x5, 0xac80, 0x1, 0x74b7, 0x800000, 0x80000001, 0x80, 0x8, 0x8, 0xfffffffd, 0x400, 0x54, 0x0, 0x4, 0x200, 0x8, 0x5, 0x2, 0x5, 0x6, 0x7f, 0x3, 0x0, 0x1000, 0x4, 0x33, 0x7f, 0x8, 0x974, 0x3, 0x1, 0x3, 0x6, 0x9, 0xab, 0x800, 0x100, 0x1, 0xf30, 0x10001, 0x2, 0x9, 0x0, 0x2, 0x5, 0x9, 0x401, 0x4, 0x8, 0x0, 0x180000, 0xf8, 0x10001, 0x1000, 0x7ff, 0x159, 0x3, 0x540fe27a, 0x8, 0x9, 0x8001, 0x4, 0x7, 0x4, 0x6, 0x4, 0x4, 0x3c, 0x7, 0x1000, 0x97000000, 0x101, 0x0, 0xffff8001, 0x2, 0x4, 0xffffffc1, 0x88ae, 0x4]}], [@TCA_POLICE_RATE={0x404, 0x2, [0x7, 0x831cd8b, 0x9, 0x2, 0x2, 0x52, 0x4, 0x3913, 0x1, 0x2, 0xffffff0a, 0x9, 0x1, 0x1, 0xffff0, 0x3a137682, 0xff, 0x9, 0x251fc07a, 0x4, 0x80, 0x605d, 0xff, 0x6, 0x0, 0xffff, 0xe1, 0x0, 0x0, 0x20, 0x0, 0xfffffffa, 0x5, 0xa7, 0x15, 0x101, 0x36d, 0x9, 0x1, 0xffffffff, 0x3, 0x5bb, 0x8000, 0x4, 0x400, 0x2d9, 0x6, 0xd0d, 0x2, 0x4, 0xfffffffc, 0x5, 0x5, 0x1, 0x63952b1b, 0xff, 0x3, 0x16a73061, 0x3, 0x2, 0x8, 0x9e, 0x0, 0xfff, 0x68, 0x7, 0x4, 0xe9b5, 0x80, 0x5, 0xffffff17, 0x1d0000, 0x1, 0x2, 0x2, 0x8, 0x7, 0x8000, 0xd00, 0x4b0f, 0xffff0001, 0x670, 0x8, 0x81, 0x4, 0x8, 0x7ff, 0x7ff, 0x6, 0x4, 0x20, 0x1, 0x1, 0x400, 0x0, 0xffff, 0x0, 0x7, 0x3, 0x9, 0x4, 0x8, 0x5, 0x9, 0x400, 0x0, 0x80000000, 0x9, 0x8, 0x8, 0x525f, 0x7fff, 0x7, 0x8, 0x6, 0x1, 0x6, 0xa0db, 0x3f, 0x4e07, 0x6, 0x100, 0x1, 0x6, 0xfff, 0xfffffffa, 0xf2, 0xf10, 0x7, 0x5, 0x7, 0x2, 0xc2, 0x3, 0xff, 0x0, 0xffffffff, 0x7, 0x800, 0x5, 0x3, 0xc7, 0x1000, 0x467, 0x2, 0x9, 0x4, 0x5463, 0x153, 0x3, 0x1000, 0x8, 0x9, 0xffffffff, 0x8, 0x3, 0x0, 0x8a0, 0x7fffffff, 0x8, 0x5, 0xe3, 0xdb, 0xf703, 0x86f, 0x3f, 0x1f, 0x7fffffff, 0x8, 0x1, 0x7, 0xffff0d5e, 0x5, 0x9, 0x81, 0x1, 0x22, 0x1, 0x5, 0xf, 0x10000, 0xfffffe01, 0x8000, 0x76, 0x6, 0x7, 0x5ec66bf2, 0x7fff, 0xfffffffc, 0x1f, 0x7, 0x97c, 0xa333, 0x0, 0x1ff, 0x101, 0xff6a, 0x0, 0x46, 0x9, 0x80, 0x10001, 0x3, 0x1, 0xf338, 0x4, 0x10001, 0x5, 0x9, 0x2, 0x7ff, 0x62b4, 0x9, 0x1, 0x7, 0x200, 0x3, 0x81, 0x1, 0x7f, 0xffffffff, 0x2, 0x3, 0x0, 0xffffff01, 0x8, 0x7, 0x9, 0x1, 0x20, 0x80000001, 0x6, 0x7, 0x4, 0xfffffffa, 0xb3, 0x1, 0x3, 0x1000, 0x9, 0xe17, 0x8, 0x5, 0x39, 0x6, 0x10001, 0x3e, 0x77f2, 0x9, 0xffffffff, 0x62, 0x7f, 0x8, 0x1ff, 0x6, 0xfffffff9]}, @TCA_POLICE_RESULT={0x8, 0x5, 0x80}]]}, {0xcd, 0x6, "71fdecd6a4f4fb667fc46b473ccf4401974ede14ce09a5bd8a789176a30fba411269897ba5a13f9152007d72838401e04488571a2fd25cb34693b76fc0d34200f3e3d0024c706661d4245eeecadcd51e3a3b5861a4ffbfb58f40134829ac521727d993cb95dc55a9fd053c1713597e41e5ae46a345990548d39c5b63ddd20f357ecc037dc6b6d09c94e3099066a7256df67b0a27383f86b39614d349a21a19bdcd577fa190b9fd9f4d941a0a276b236f22234fb5f67cc87f39a2652cc0b97152ba7ac752eeced6736c"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x1}}}}, @m_pedit={0xbc, 0x1a, 0x0, 0x0, {{0xa}, {0x3c, 0x2, 0x0, 0x1, [@TCA_PEDIT_KEYS_EX={0x18, 0x5, 0x0, 0x1, [{0x14, 0x6, 0x0, 0x1, [@TCA_PEDIT_KEY_EX_HTYPE={0x6, 0x1, 0x3}, @TCA_PEDIT_KEY_EX_HTYPE={0x6, 0x1, 0x3}]}]}, @TCA_PEDIT_KEYS_EX={0x20, 0x5, 0x0, 0x1, [{0x1c, 0x6, 0x0, 0x1, [@TCA_PEDIT_KEY_EX_HTYPE={0x6, 0x1, 0x1}, @TCA_PEDIT_KEY_EX_CMD={0x6, 0x2, 0x1}, @TCA_PEDIT_KEY_EX_CMD={0x6, 0x2, 0x1}]}]}]}, {0x55, 0x6, "e8b714955aff02beba9f979ee73dad360e4744faed71507eeea46ae5cc0a7feb948665465597cfc2ac08a2b3d689480cb6b28b706d3dfd131a1d82f8b0ccd0ad944b95f9755b2d835893f837a1782c3ae9"}, {0xc, 0x7, {0x1, 0x1}}, {0xc, 0x8, {0x2, 0x3}}}}, @m_xt={0x4ac, 0x13, 0x0, 0x0, {{0x7}, {0x3ac, 0x2, 0x0, 0x1, [@TCA_IPT_INDEX={0x8, 0x3, 0x5}, @TCA_IPT_TARG={0xf7, 0x6, {0x400, 'security\x00', 0x3f, 0x401, "6e355aabb9a96176f9f1298b7364f73780a79ab038faa790a6c2e06293087fc73c1493b99b9bb9b1c3cbe0fa3ac232992561b88403893acd340f47b53da247ceaf0840a9b1fa846ff434e17b5bf74b89a15a857bba848203c72a0f3bd6b5cd42aef06569b882542732a14f4c9f08485d92723ceb4a9236427174e9f0a6cd49a30590a03e77e2ba9d93aef89bc08712c0fed4109e169bbfd559116abb25790c7f721024fc0501cadc59c72aef7ebe7a88082aea0c581bfb52a56af0157cda37e481086b3eee2dfbf628717ea302"}}, @TCA_IPT_INDEX={0x8, 0x3, 0x20}, @TCA_IPT_TARG={0x11a, 0x6, {0x1, 'nat\x00', 0x4, 0x100, "95b759431547ace99f61372e00e4a99ac7bd9849d3f71efcd7b2c2162ffabdfa9c94b20125edd7eb65b749af0a13fb50f796d31a5bd05c912d017ae0330befa01b045b394103668188720e2694925636d0ebd9592bf8131ee4e68163739ba55708bbcfda3dd35579ee5d6d2d8b879aa121d56333f901a5d108abbf7e5bcdd7cec2e14c2f98a790e60f9f67250096f352a9a97ed83d01f725a1d47a27482334701a0c0ae97a84f0fff574572bbba548600f116fc9d5932f10a1d5555f6cf7f9a8685c8753c46dfd964d84f06f473e93346f6a5300ed4e7f82dc1bc704f45840795e8991e970f276a93b0247187820224b"}}, @TCA_IPT_TARG={0x9c, 0x6, {0x5, 'nat\x00', 0x42, 0x3, "b5c258996c0dec54f66b2adbb42bd42f61831ce342304b84aeb3710348536ce78c413e9a204ca951e0badc055c59963dcad0a5ede5df17e54db96eb5cb87c1fa2ba6ad22c8f646cf96c5e04da06863edf81c228fea3fc7b2ed622344c750ef8f3e2d6401a8d7cb1e204b3c893d10987f0f23"}}, @TCA_IPT_INDEX={0x8, 0x3, 0x1b50}, @TCA_IPT_TABLE={0x24, 0x1, 'filter\x00'}, @TCA_IPT_TARG={0xbb, 0x6, {0x4, 'filter\x00', 0x0, 0x200, "2e14dc42a439f686faafdca2af378fd9721362dab9f2bb2da2fa7ae34b609d21a3575b9a10235a0ee99524d91d8a7c32cf5e369f364429d6b618a1ab2211968b323cb8c8e573293aca15125d3979de48e647a72c967a1030ed8cf6a0dbc770bce2d2867008f80de2ca1af384fb33a02f9f3959e42c758a9677b78d35f23438dcd3b5e5b3e3de15e1887c7f6f88ac206101"}}]}, {0xd9, 0x6, "b93d34c391cb4d535592434f9a7c9d68135fe32abd27380f1d64ca820a986cbdff0300df727037654e1f2362e8154d8d5a535190cfe9627aabfb6c17be6feb67f0ef7b31a295c6bbb4fd3eae2ae784e4c66cfedf7b1a050aa4d93368b43825dfdf46906f7130dc4b70337be98cabade89e90eee6be4717611aee366e9c4d156ae1303f164bdde6e22de23d188c48351b0068a76fd3fb3e4278ba8f9412c92c179554a234ea624bf4b0aa6637ddd410e55680c7acb904e934dcbb9005e66b4fdb9f8f3aa55dad4f63f25190a71a6a102661e3225b80"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x3, 0x1}}}}, @m_nat={0x174, 0x1f, 0x0, 0x0, {{0x8}, {0x7c, 0x2, 0x0, 0x1, [@TCA_NAT_PARMS={0x28, 0x1, {{0x2, 0x5, 0x3, 0x0, 0x7e}, @loopback, @rand_addr=0x64010101, 0xffffff00}}, @TCA_NAT_PARMS={0x28, 0x1, {{0x1, 0x53e0b663, 0x3, 0x2, 0xa4}, @local, @dev={0xac, 0x14, 0x14, 0x2c}, 0xff000000}}, @TCA_NAT_PARMS={0x28, 0x1, {{0x200, 0x80, 0x3, 0x9, 0x7}, @empty, @rand_addr=0x64010100, 0xff, 0x1}}]}, {0xd3, 0x6, "c524e609445b859e6a27c4a7f8c7d1bae3701acd22365933aa623d0092aea7abb88d4a33fd111e60cafc6d66feb7413f7134371f62c6cd0b8263470d0ba0452e82f9a1b328278e5273a0aa10b3a8788ddd6d25fca7cf9bf7f1d271293885a7ca0e13bbfba90406ee5ac96c28add0007dfc422ad3e166b7b14ed6a7b4f4de78dc1d1da095e9487467ad4c761816c59cb0b6baced621dd58043ebab3f405bc4b8aa546a89a1d88430db16418a0f35019c642a23d5521ff4233957f2f7cb469d94ea1922947cb13da09f514e2d0c6b56d"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x3}}}}, @m_skbedit={0x6c, 0x1, 0x0, 0x0, {{0xc}, {0x3c, 0x2, 0x0, 0x1, [@TCA_SKBEDIT_PARMS={0x18, 0x2, {0x4, 0x5, 0x2, 0x80000000, 0x1}}, @TCA_SKBEDIT_PARMS={0x18, 0x2, {0x1, 0x5, 0x2, 0x3, 0x80000001}}, @TCA_SKBEDIT_MARK={0x8, 0x5, 0x6}]}, {0x7, 0x6, "e597cb"}, {0xc, 0x7, {0x1, 0x1}}, {0xc, 0x8, {0x1, 0x3}}}}]}, {0x12b8, 0x1, [@m_nat={0xd0, 0xa, 0x0, 0x0, {{0x8}, {0x7c, 0x2, 0x0, 0x1, [@TCA_NAT_PARMS={0x28, 0x1, {{0xffff, 0xfffffffd, 0x4, 0x5, 0x5}, @rand_addr=0x64010100, @broadcast, 0xffffff00, 0x1}}, @TCA_NAT_PARMS={0x28, 0x1, {{0x8, 0x1, 0x7, 0x6, 0x80000001}, @multicast2, @remote, 0xff000000}}, @TCA_NAT_PARMS={0x28, 0x1, {{0xfffffcc4, 0x8, 0x4, 0x4a, 0x7ff}, @local, @dev={0xac, 0x14, 0x14, 0x2a}, 0xffffffff}}]}, {0x2d, 0x6, "bc6b8bf5aab2e78a7ad21549aaaa47e86907c349180305192f884edef76ddaef91fb29959039f9d932"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x3, 0x4}}}}, @m_bpf={0x1090, 0x12, 0x0, 0x0, {{0x8}, {0x68, 0x2, 0x0, 0x1, [@TCA_ACT_BPF_OPS={0x4c, 0x4, [{0x7fff, 0x7f, 0x6, 0x7fffffff}, {0x7016, 0x50, 0x5, 0x7}, {0x20, 0x7, 0x6, 0x3}, {0x0, 0xf7, 0x40, 0x7}, {0x3, 0x24, 0x9, 0x9}, {0x2, 0x7f, 0x2f, 0x3f}, {0xa77d, 0x40, 0x3f, 0x9}, {0x8, 0x80, 0x3f, 0x1992}, {0x8ae1, 0x3, 0x39, 0x4f9}]}, @TCA_ACT_BPF_PARMS={0x18, 0x2, {0x7e0, 0x9, 0x6, 0x5, 0x7fffffff}}]}, {0x1004, 0x6, "d5c397cd6a05dfb9ee416af74a320149463645396597a2b5cd06857b94e64883c377191a7447f76206cbf62b17395679d6455a77d466b611090105e10df94939e349296f8b25ecdd8b37b4e38dfd6c40af43b5675aca6aebfcdad96640c38eecea90c6f401e9b1ea907b505fca515af7b9dcc62128b5f5a2ee0b923e73becb7b974cd10d12ac9313cfd146b4eef21c3c01dd074411543a973f53954abca2895e8d560147b79b9046a925bdd3d40ef8226dab1b40ea5c9e9988374e04011a06b012c41a426c2977702f6a3d8dfc2afa1fdda03e73f2622958151834764b2f447f73b9fe97f131a96d248594719da44dd61c2fb5a7f627c7042ff2de0cd46a0a8b15616c560f13cb83403735eb433c5e17b8d8c8b1fdf254beb05b1b5988465b96da802f1f6b32346ef1a83ba4ac4088166eed1a6dc19f4f069cfa1845641c9da70f7a684b46b6a8c4eaf1888220bf7a6a830d2deff703e71afa1e44903393834c75c20a296d822f70cbcb20d4683789eef42cbddef50b652ec5a24645b35e8fd5b9cda2b98f85bd786fececa8d18b3cd7d74431efaf42c08a0f98b0f1ea5ccd990f11b81a61a8cc0173b6bcf2449cb7530b3a8faceefb5161057b29db366cfb1b02c0bcf834b8ee7c3d5a22e137c1ad7d17261f40bcf15565b27c864d1f388c264c89c1e5f4c0e70f396afcb190c57c8a83e42d6253bb339feefb786696b7679c0846c55c86097daaa948a9cc496be08a95a2e51a96f8c4efd2195102f8f020d2b56648309aba5b2494966f0b096961412506c02616fabc1b523e6e5138d0629ddaa0ab3af3b3ded3c27f1317c4fb864c2dda0fa800814b36b1edcbbd1dee5f502d1bdb98f4d7bcd65b65158fc235ddc982bbfb70c10f38fc765146b4bab0ba4d650d463125ec6d6cfabbce5e7e0ad7661b1344771f4696c67a75cdf9a11bc4202100b3bd149b1f4cc1c47b3eef569d7cdcc57913e5cc93c9dc4d1cfcb5d288a1e3d22f9095a0e5e1dc56daf42d9163b5f7c8e1e5ae295add87d098959f033d44cae83332ed4f042536f930753f3b758e0454cdd23a2e582763cfb5b95319230ae0842ce4fde301dcc5acce4d748fd9d5472486b0b925a305e2f74caae64f1cf34fafe551430e68f45a10a64e2cf30ffd72f3da3a92c412703dc90dba3a4ee9d5fa249eb258c068e0a0e6af0bf686a67ecd9048a262bbb70ec5a08be882ce869bae99311ebccb048b6de6d80efa7c061728f6ccd88b48956d3176be9ed51eccb849b2da66a2f57e29a5b2f5f2f543c7283b807b08d075f309671301337a1bb2e6e0845f8b2dc80a81b702f7eeb81c85482e4ef0abc0fcb61a6a0554afd272a3133fe6efe695143c5f94fc2185333fc17dcda0b1249cfbceaa84229c8e52bd671c829982716a6ba2cac973786a20dcdb1489fcd7e375aeda14bee7ea1ad20dd485a0bc6e2f7b0730ccdd1fc1f9560a6d2f446f311adf4e440757ef8805b970aadb96041dba69a7a1e16004d9846c94cd33196ff0a14299731b70e15dc686b91a5c8733d9cefb7ad44cdb397a9e7fb7d86db92786f16d8b6056dc194d46c25d23afc493edceace06560576f6cd45462591c3e93b4c71680bbd6a5bee9d1ff1a4ebb7e71266793e66d2a03a182c808375ff1be0ad8f0615e11d06713042ec257e0a2c9e6bb6fbbaf48b5887a2ac52e0068dfc1ef2d5f0c79febb6e83c014dc176cb36409e7e4837cd2117f7f9517e4af3c26ce3a56b0f6cd10b3b1dab1dc35ee8c268b14d2d7110968c96653801a992a238230c611a1b700311c3f976c1a85e5fcfc153db9ec7f95fa9bd83a25d971708ab14a19cf53fb0ed00c19fa1b77ccdeab51027300c7bac430215e32e3b5961d26a1a477e9cbe4c38428b1f89a7987a9d9971c4da1def147beaa14a0579d01fc471eb0550513fd53a1371b90f3cdb7a3c1b8f2e60c84d56f672502ab66c2d69c326bd34b44a6f450e64858a41953247056a8cc23e4f7d67f7515e64c0c16c40e18c0ae58abb92ba557aaf3115bbd16f10c5a165cbd31b1c93fb2b7171efab569d6ad3211891d2a73bd0e551363ad93b59c1679f20ee92b10056ca24792ba91e22f95a931e2722d19e904883e1e6a35b16786d8a9a1f8a574f7b666a6cda61ffd17c7a7061da34d0412cfdc6786fb354b1004068f523e6ac282e91f368525370a2e2b4b05e879e935b064a12de9f84c1583c99839ff6567386e64093a7f4fb68d5b79f2b4979cc2896fb544fac13673fd39a83b27e95337868d4e9b2a62ba5662be6293f430178ccb5b7c92ffbb2aaad681e7db1e85811a2e207e56dc8949d725d206282300ebd31a77bf158bb020328178c86ff005ad4e9531464c322886e8f1a47017bdc42790499ae5f7431293cfde6ba0f48826a73300f132e46dafdb74ef5fc6829c4e2abc111f1bcb055ee157d9189b4120e035fe90d3157dc2e8b710ecd1daa92dba256ae6e7aa2b8b1991d5df9dad07d09875599a43eda50b0726a195a32690b19a0257b7455fc96ad590fe009fe0ae6275d95295ba9b4a072cb5fb9e2ad2f433ef5fb7687ef16ddf4b613da4be923432ea6cf6bb472fe43644cea7893b5203efeaf63278ec1f3fba16b53e42b4e68ec48ba8a62a53d508c5145a6097806a6f4cc9a15469a3cde50844e8a0525ed3a04de0afad876733b6b8d82d9a3488cc01ab72808feb3a3979725355288376d57929eabbb0043bd6f1f841473d59221d4059bcea70d3bde23fe6abaf30c189c0bdb99bb56e4ff4af854ee1149017fd1bf5fa9b24fbd4370cc2d73b677f36a751ac8e21239a243622c732c7357e8a44ece685c6d585e4d82cb997160c2766949dfd8e2590a4dffccef738a2dda0eda04c06b389c41ab04536fe00c81d18adf0b9834ee24bcb9669fd77e8648bf9d5add2f237809f4c10a410afadb4bf6a98b4a4c5021d1a6fbcd52039ff9adc446c3a67c317a41785dfbfcf6e02a17a7f0f0f14fba4e8906a292e52c60c6136b2550a5bedab67d9adb9fa2f3c8fc04aa5719b87fac18b3363d753c3046a2e7bf2ce7ea9d989c6c9e55deb0861a0b025421efe20060110d9b9fe4f4473f8c20ae283fd688911c6ad451fb980d0b22eb44577015e452aadf471fe21590da1a10111ee241db4eed02881cacb87ae40788beb11ffd83a7b7bb6521b538ee94b20128c513cd074ad79f43d11e34cad7752fab6f653b4ddec32156ab72a255ee11efffdccd6ab5bc2b5b6da3fc4f31209cb71bc0b8d94d52d5abdc40ce329a4aedc3d0d77747255d1789f5ceab23402d5c9bc641270eef2d36d7f30202521d39764c5ac3c725ed17f278700576700ac52906fbce21a41385c8cfeb2c8cc65da752842102236a0c695a63f47b088f7bd9c9c60365136b8be6384b96e675444376844bce8d0438fbbe921429ce1c8c0a34fcb6b20b161b3dbc3f5bc852c8dcf3fa56e58ae7e70010ff0f74251a61e8eff8aad004d976a8b2c15ba6cd898f5c245e50c8f4bc5e170ab0ecb015e14cb06625189425470c2c56a959e7eafe8fe823f876d13c073dd0183acfb0ba18d54cc013da148960227b6f95f682af879e7d544eebe8f53d914940d9ad6a2e8289a690b0a914ec0878c9465f7782405cfee4a42f31e5c89ba27838f71515692fc18a500df155b74b4c8200077f513c3930d7966c90b4747a8fe5774b9a8f363084cdeb9e9a48c2b40da6fecf610cb66fd255e384e2f315d5d2f20f4c120a7aef7ffcb66a39340f000dbb9eea8e071d6b0adffe022279535e42329f03f03d0ca127804f1eb28af549d740891d80490ff710201c03d2c0fd75a1824ca80db7a9c45fcfbd1e020ee68be926ca6f780626c80f9e49b60c05f5557f2583794ee6825d2c831520530b200e74ddc3236422fd8b1d8cf7090cd7c10d21a9b4e315e83dc9bf15f23f0acb032be93e3029485b509f1d1c8c4c34784cc936ab1c8bf0dae4ab2f9b88773a30306543759bfba8e47a10981a70bad0ca33ce5a44183351ff9efd1569c8abb7e096d899361fe0a27a471e58a01f120f2a1adb0853dcd7a6b3224a7dfbbf1b9c3c13e9b5cdc43a6ce13aa8e2eabc0d7e9a25802ec3532a2adeb0b2af0386804c0ec3cebf1044d4e7fe4b39120a37942e0a0b76472318b67483d5c89e5afd5a9df8ba8a5d216fd5dbe33f1ef344b9c56b1d4ebc6fb4688b96005e86772af5350ace9f2f19b5180577e8e4369aad771fa2330109e5e58024d37c712f577b68037c1f6b18c10b1bbbb4e5dd07a16ff9a97c21983437709c65379de11a1727b1f3cfbed2d7286f55e43ede4b0696a310491485b2ea2bfa3d4f9d6d75b2780d05372c3941b371f97c6822ce5473f753e9e9402eec9e8c5e2678bbced5c6c10e3f995ef6013feaa0ed621b4cfe2ed549623f8ce3e7d4b8a160efa15c931c6b28c3e926988a4d4677f9d36a464ac0c7c24aab048169a52306a761a49b73b44f83ab65126aa29e597512caac7b2baf974322b87604aa6636e81759fa411e0cc3721828f9d5455808573a8807a0bd06a034c79e1f12734a6fd47cfa650213c3dcdee3540e3dc15c536e83d9984c6020ec6aafe8382094621128f13a1ff2d854234107a043a1431ec584c01e3bf0d78bff558d9a8cdf6dad5b500f0be8c1870487e08ebc34616b6f84c09e2cae4795014a232b84c4cd247469650763abe8a29ac653c122580b119c10f669a545b4357540c1b3114d219ec0d8f562bb5eb51b70f71c744ce4703d617c4f818a9ba959f353c8174c61200332d9af94035a5906ad34c7786d0a900fd102f8cab93b01fbb038f774f8616351c0e862f3eaae5ef17a5c503974db34f6bd2abef240562eac1bcec04e7c2f4628fcd271a874ca4b6831196ffedaa91ced339c46d8b9d4316f66bcd284274314b05dabd53ed14b39eb3af26f4053ca22fc3918340ed9eb9fbb06ae98ec61ee23cdf01fa688bea848ff1f3462faebf6fbef438255fde9286b2bea6e6ae525aa5f279a31ee3b349201c975d6fcb01b833759cadebb35eaef17d07a5931b5e9c9731ddb23ed5c242cfcde0c2c4f68364353ec4e29817a8f5c801f3d383d7373555914f771a11b3d11b6e38cf0b8c53f20a84a6fcbf4a127912bbcccd0f0c5e07e8ca464d208b4c9288cc3611b05bbd6fb44072b923f83d2d1955fad5055109cf2bd8a3bcb30386816b0b473cd4e2e9131a316d3ba64cb26b8f2c2f8ce463989a3c7d3831b4137be1f2bf57d1a0e599d72208613adcfd3c3b1c86a4d7a7baf8bce90e195d50d368a16675dda9f70cbbabfdd5206d595a8a92795fab2fd3d0a54e533d82f3fbf95b3655f440a8a49fdfecc80c2176532713539c3f89652571b60f09162163b99afe83a3677ef6c8683ef0affa72fe16c7632d55f0869aa9e8acf3df681c99f4bb59afd02dbbd593c95b4179adf6149b7c6e4708d2dba66dc279056e7b5fd672abb9a7224e4428e4fddc714a867da8c41cf572df942d2d27e43420e9e5595cfdaf089e9283034ad5facfa41d9ec293560255300bd9e1d84468a70942fc87090a9a33fbb843026012177789814499b3234e58e2c5c360535806f7bc2d063ffc92d1029ebd463ba0542931df9e1294fd29cdbd367691f4935215abacd75635fc331c33e1cc875af14f2e8aa2bde61e49eafb6618c122786c204792d2e11ae4a69d98046f82900570336b1e599ebf102de320febd2117b12cac8c61d34f58d94890db73b70a20501f65131c47dddd50e6d1453741999ad24c1e0add6f3cb5d42f5204db16cc08a0ac8faf244f23a3f99fdae2"}, {0xc, 0x7, {0x1, 0x1}}, {0xc, 0x8, {0x1, 0x3}}}}, @m_ipt={0x154, 0xa, 0x0, 0x0, {{0x8}, {0x30, 0x2, 0x0, 0x1, [@TCA_IPT_HOOK={0x8, 0x2, 0x1}, @TCA_IPT_TABLE={0x24, 0x1, 'filter\x00'}]}, {0xfe, 0x6, "1c99289766349766d6fb510168184525fdeb9ca5cfc8cddd4807de1f21b28ffbfdab168537494ccc1ad0bcb4c5d61f7450fd926be0abf90240d303fe47a44b155989144aacb23019a9e560b58875604502aca3a4ee10845fed4a1059f4704a4c37c65c5f4358feb67a61a3d24d717dd224085e4d4f095aad044e568af35003949bb11de9d3cc065ad6d6f0c5415f01f17c78e85211466384dcb192b646cd178f3eaef8f8da5ed00555f73aa1b939e79eb77743ca59dab6da445c222e8adc9654767838747d34fd788da596f4993519af07eb283c444346af4a9fce0218907a0332605feb938b2b8b00e5c66d1f34d3e1a6d8d7bd931527087bd7"}, {0xc, 0x7, {0x1}}, {0xc}}}]}, {0x504, 0x1, [@m_csum={0x130, 0x7, 0x0, 0x0, {{0x9}, {0x100, 0x2, 0x0, 0x1, [@TCA_CSUM_PARMS={0x1c, 0x1, {{0x0, 0x7fffffff, 0xffffffffffffffff, 0x80000001, 0x3}, 0x3a}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x4, 0x3, 0xffffffffffffffff, 0x400, 0x6}, 0x28}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0xfffffffd, 0x2, 0xffffffffffffffff, 0x7, 0x408}, 0x20}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x80, 0x1ab, 0x4, 0x1, 0xd78}, 0x2f}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x3, 0x2, 0x5, 0x7fff, 0x5}, 0x17}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x5fd5, 0x1000000, 0x4, 0x7, 0x43}, 0x2d}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x8, 0x2, 0x1, 0x0, 0x7fffffff}, 0x68}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x2, 0x8, 0x0, 0x7ff, 0x7f}, 0x4a}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x4fc7, 0x2, 0x701d22f88279bd7c, 0xd811, 0xfdf0}, 0x76}}]}, {0x7, 0x6, "a457bb"}, {0xc}, {0xc, 0x8, {0x0, 0x1}}}}, @m_skbmod={0x17c, 0xf, 0x0, 0x0, {{0xb}, {0x5c, 0x2, 0x0, 0x1, [@TCA_SKBMOD_ETYPE={0x6, 0x5, 0x6}, @TCA_SKBMOD_ETYPE={0x6, 0x5, 0x400}, @TCA_SKBMOD_ETYPE={0x6}, @TCA_SKBMOD_ETYPE={0x6, 0x5, 0x4800}, @TCA_SKBMOD_SMAC={0xa, 0x4, @link_local={0x1, 0x80, 0xc2, 0x0, 0x0, 0xe}}, @TCA_SKBMOD_ETYPE={0x6, 0x5, 0x817d}, @TCA_SKBMOD_PARMS={0x24, 0x2, {{0x5, 0x1, 0x7, 0x1, 0xfffffffe}, 0xc}}]}, {0xf7, 0x6, "469adb2e2cf41b85321c7b5a23cfb528ffa3f286d09af62de4c0437ed17e4bc9c0df7e91f1dd947ceafd1b3fd4482281e6b6beeb2e830983a1d8fcc1165bf74a9337c5d61d8de7fc97a7a88ff350763277657a4d4974eabf8c6afb00c159e410f71cbf97f49ef1d6ec8595a0c901694f158703dea8683b7337e77c54fc95ecf6fa59f7ecb82e4bfa7de9c4f19d7d17cc0aac8b5f4f27346cac83a14371745ada93c9389893e54022226ce33f522e2f30a08bb8f1e2d086a3a41dda677a7b90d8c10284d939365e627baa0f229165b37855d9075ec9a3215620d0e26e9c81a5168e03546833ea5e89fb0ed7d07a34f44df43233"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x3, 0x3}}}}, @m_xt={0x254, 0x1, 0x0, 0x0, {{0x7}, {0x188, 0x2, 0x0, 0x1, [@TCA_IPT_HOOK={0x8, 0x2, 0x2}, @TCA_IPT_HOOK={0x8, 0x2, 0x3}, @TCA_IPT_TARG={0xc1, 0x6, {0x1ff, 'nat\x00', 0x1, 0x7c, "a36265120b7e186ca73fc759df8344fc2da4c0e16638b260c37046bda260fe3105d6fa10b57dae25e049be42891968806e38241feb33b5ba929cfc8955b9b9e60bd0826593b77951e9dccf0fb6d938c12d6868dbc6ab182fd31f0694b458c7ce30ce63bcb51c3d90c82cd09197ab7fcb15fe2d30fd649858f9435da9dae330e353d73cdbb61e05ffbffd05c881b3e94dd4e1ed0dd0fce0"}}, @TCA_IPT_TARG={0x65, 0x6, {0x6, 'mangle\x00', 0x1, 0x2, "500437c4c1b7f744d5efe01b98bae1391531f0a2d3e85606b333f4bb365f02efecf2709598cd6ec282a516b504c7da9e4d30e4cf60aa9b9af4d7da"}}, @TCA_IPT_TABLE={0x24, 0x1, 'security\x00'}, @TCA_IPT_TABLE={0x24, 0x1, 'security\x00'}]}, {0xa6, 0x6, "e1d0c5568381ce0be90c8f727e47e1c9f19320379a4aabc4987eea4427f9729077f4ef598275cbcdc89aedc6277c313141bc107d81df8f22e26a77fab298edf9296c402840d753aff241a7c1c33077d1ee94471d8d7f900399b87efd3b6008453a5202861c54850aa152286831a718f02d04168bd0185fc3cebf3aa93bb1dd5ac99520a71faf5e747d3aa068e5e1ec548e3054a433da1c9c822337599fe2f79c3c15"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x1, 0x2}}}}]}, {0x16b4, 0x1, [@m_connmark={0xe8, 0xc, 0x0, 0x0, {{0xd}, {0x20, 0x2, 0x0, 0x1, [@TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x4, 0x80000001, 0x5, 0x1, 0x4}, 0x8001}}]}, {0x99, 0x6, "03aeab77c6b6512230bfb4a4ac9b43e3dcf0d450876d33959dc776e07a78196e3269bac6d8589a252afcf9539f70af09178c9c32b5d5968d9a4b0573f10b524b8e4cd19caf58fd02402025b05fd8e46fcd1c94c85b0dc75fe271aad7cfb111e47a70f5dfffdf94b6992422f368abc3fd15a707470c933185cb19812aca83b9611a926a8937eba6778a011e35bddf74b85861922719"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x1, 0x4}}}}, @m_ipt={0x23c, 0x1e, 0x0, 0x0, {{0x8}, {0x174, 0x2, 0x0, 0x1, [@TCA_IPT_TARG={0x10f, 0x6, {0x8, 'mangle\x00', 0x81, 0x3963, "fdd7875d02135264942e2e13ceb04c4abfc1c38223b7750262be842e76f650d9a8d6814ee8c83b01075659f5806c9bf17132c3f7898e32c08265853f9a5f37802c4d62eea01ac9c7458c96c094c6c161f3541fcc90630c5422c67169acb00e8bff2a268bde93a0dd5589a7f9cf35f5839961cc5cbf769820644ae0b07d94e45f23902b5b3ae7731f162f0144e8bb7153f0ebf787387e3cfcecb4fb96e749c5b849e0dd4bc4ac4647692857192301a035d92651455bbb9359345f10634e10d8405ee8049471621d9735138ded1c7fa59de5f150da3606266fd3e8fe91f90e032e3f0eaceb73"}}, @TCA_IPT_TABLE={0x24, 0x1, 'nat\x00'}, @TCA_IPT_INDEX={0x8, 0x3, 0xfffffffe}, @TCA_IPT_INDEX={0x8}, @TCA_IPT_TABLE={0x24, 0x1, 'raw\x00'}, @TCA_IPT_INDEX={0x8, 0x3, 0x400}]}, {0xa1, 0x6, "ba7efa3f282338c36e76b0cac6a7041260e6223343731221c4739f4de0c089b36caded50142fdde413819c12a936da64fc0c8d9cd8ecbc9a1a501aa5a6f1038a90793b3c2c440c0b86651fd1b5864446dca51e15d0fd81734bbf6bf6b9322003d8d102a54af20085485c9d7bd138667551a0ad46e3ccb501125791f70dfece56fae62212400d45412bfa98882c215c077793b9ed54d28986000009d854"}, {0xc, 0x7, {0x1, 0x1}}, {0xc, 0x8, {0x1, 0x2}}}}, @m_mpls={0xf8, 0x14, 0x0, 0x0, {{0x9}, {0x34, 0x2, 0x0, 0x1, [@TCA_MPLS_BOS={0x5, 0x8, 0x1}, @TCA_MPLS_TC={0x5, 0x6, 0x5}, @TCA_MPLS_LABEL={0x8, 0x5, 0xb315}, @TCA_MPLS_LABEL={0x8, 0x5, 0x30021}, @TCA_MPLS_LABEL={0x8, 0x5, 0x8e9ea}, @TCA_MPLS_LABEL={0x8, 0x5, 0xccd5b}]}, {0x99, 0x6, "d229cad884cae9affc3174e07f4c784fca055c4bf8d0398be5a871d4e5365bafbe988159fd8ae79523f2338ba4c88ae8938d8da1c1723a1e54156c1b754807c8005c931732bb05a2c2b5bccf6109a252c0993da752485a7bc9c8396c426b2a6dda2fdf0766313b40acb6c878394c31449a01ebda859adb01c0c15ae91f757cd307f7c6417699fbefacb1186354412a630b4387515a"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x2, 0x3}}}}, @m_xt={0x184, 0x2, 0x0, 0x0, {{0x7}, {0x12c, 0x2, 0x0, 0x1, [@TCA_IPT_HOOK={0x8, 0x2, 0x2}, @TCA_IPT_TABLE={0x24, 0x1, 'mangle\x00'}, @TCA_IPT_TARG={0xb1, 0x6, {0x1ff, 'mangle\x00', 0x7f, 0x9, "9e56e3ac8ddcda53596e90ecc48c78d65eddcdd5ace92ae5e2b159c3087371890895391f0002ae841d5a894346f1f3ecedfb1ff4a73509e7ff0617ca4f0f55c9ba55022f2cafb884ca418208e2d2b6b0da84e1b1cf1fdc132088a8eeeb33a00729548079410f967c5915754f3f11ad6d85365cdc17dc726f47a66736fb7708ccc3004c8c5b525c"}}, @TCA_IPT_TABLE={0x24, 0x1, 'security\x00'}, @TCA_IPT_TABLE={0x24, 0x1, 'security\x00'}]}, {0x32, 0x6, "655222f21dc8c3c3eae5ba5ba36a4a84581afb66b18d7a51e0154c419483f86ce93812b0483f728d2fb8f059455d"}, {0xc, 0x7, {0x1, 0x1}}, {0xc, 0x8, {0x3}}}}, @m_csum={0x1110, 0x5, 0x0, 0x0, {{0x9}, {0xe4, 0x2, 0x0, 0x1, [@TCA_CSUM_PARMS={0x1c, 0x1, {{0x0, 0x6, 0xffffffffffffffff, 0x100, 0xc1}, 0x1000}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x7, 0xffff, 0xffffffffffffffff, 0xff, 0xce}, 0x70}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x7, 0xd3, 0x3, 0x7, 0x9}, 0x61}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x6, 0x5, 0x2, 0x4, 0x3}, 0x13}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x67e7077e, 0x2, 0x7, 0x795, 0x7ff}, 0x4b}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x8, 0x2, 0x6, 0xdfe, 0x7fffffff}, 0x1c}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x51f, 0x9, 0x7, 0x2, 0x92}, 0x3b}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x7, 0x0, 0x1, 0x0, 0xb4}, 0x7}}]}, {0x1004, 0x6, "a4f6744c97e0549953a6b9668c2ee5bdbf8f6d8eb58f0b6f506dfd7f25f826d6c616ee0326543d40357ca355c89495d320bb884bf4e3a5036ca4301aba26ee893e14bb0a33047f593a0e715215ab31488849a15db369778d71b15dbd6da53524468c5a59dbc31f3714a742050e8e42eebc527384d37f9333a1cd74d02419861f8d5f4fff43cdb70694cde1c976e9ec4312de161f8872bb87ea486ccacd528b495c13a6f054cbb703dc38594629fee566736739aff2bd5d82e7c01d3bd4e8f1e5c6c68baf7d585d0da2aaccf3a77e87ee8f7b01e078860b3f562dc4b16c19a69a01b2b23faca1d1418ddb5e029faf004d2e08ecb26f2f569853214465a6129be99d0dbaecac718a38aa6e0a4f026f8dfcf3df96d356369abc2b3c0d1465757d3a9ee20fab22069f4a846990c4c8779a209c9c857c97b367448323d0ac3ed69cbca5ebb86799865420ebe3e0828cf3daeee8fed764d0bf1466fa87c008ac4c0157493de93febf8c6991ca2f9242b2ee5adbd660621b940a5426cd983120e69d79f993b71aa40ac0655b262c5924b3cce92445f72537cee84a81c5d9f4609ca8649e94f9dd8610dfc6cdf90ebc2f09ccf5c4eb3bf3c60d35b94045e3e05b6610b5e551689e923494b0a6f0ff64b8e536e26e0458f1757ed71d1d9f6d176ce3c23e0cf52275cbf8d010c04b9c41bc4ada9f73fa57b22a452c38ed7f659de11d8f281f5bdb9887e7a36e84f3cd33a39c4159e8b67d0b7a64fc8b0ac831922256384b61dbf0adb4b13955a97a4967673c003a2bc269d573707762e6d12d1e81ff3b1cc7242c1072a979353661ca4a687fd43e16983a6eb30b298427770c8c337c0e87ee45d4b6f442558670db3ed4d379109c1e6b7cb9ecff66816061a221f1c5104abc4ac6a2c76f680dbbf102f4ba7a33df85b2187658ee5af73f9e37d7fb93ac50da6ff6aa558fb48beb68eafb5f23a120e938aad5042d99b6d1d3c4e586825c115e9a90c9aff7f0b6f3c46634c93f84fc8b1c89a5c69b77cedd8e3e01a641472fc863045d3e50ef3059ebbc04760bb7f35bee571afcf7a14ec547184916d4ff6050be595d0ee5b569fc7313d83dcf73e65e92f6dac45dbfd1da86c78a7232aaadb6416164e10045c6b5a7977c0a1ac5213311f9f82625652eb875c55028f1a5b05556e6649f809490309cc1571972040e0d45ff53137802fe774ab814bb823e6e0ef236f535cbbea42b082bfca32c5bcdc5e7b30a2a5dee183a31ac852c2b4cba8a3f562eea46f40ec7f97deeb97b671a148492801e0dd3b04ff8fbc6d22435fe2e3148920ece2069b56da6509739328ad0b7e633a646c62424576965b01f4925b4c7cca98480872b81017617a874dd37f7cefcbc6dba5c27748bf5971b66b98e9032273db086f0c2e4f9d752c5633a6b75bf983c1c169b710633e02c911e60468855bbed3bdcce6f37d4033752e3cac45b9ca22f6a449ef5afe0536f3e4e855937c721891f591e66c2d9fb807fb91518b20f42e6e3b81d535612bf046fdc48c2165597b5e0f636357a54b771aaf38119aaad0900a9cbb9721fa63aa274989a1369e8ee873ab6d687f2447e5ba10cec17247bfdb15a2256b97c6caa5e52eb77bda9b68577bb53a3879283a913b13c65788ce414d9af61a9daeff517ffede4882e4e30a5153997431a121a436436a6525c8668affb9d284257d0b1e9d9dca5c120d0b57eda860bdd370f533c3ceed0ab57812181a9a8eb11f79d233367dcb8cf9982be61c41bd5c808287780b0015f463f40a8a8801c0b8e30511cd1d2f661c414db7c59edcbd09286d9e13f3f40424f598f332ab01ac583a0430cb989c9bf05195eec6652175afb92ba16152caec34b0075c16cf8248717ebf4f0db91c1c6d8ed8bb65c7147d2f8774f1227351027cf94a07d7094bd92450b7c82adde469a63e1c8040a07e66f35b5f5237e7871e87ef8a650b3a2e4fa88a11ec9cef08e5baa926885e0dec1afe90c6103e8a469181cbadf93aebb9d20726c0d57e3d21556ecbacf6348b45a43f1da77b14c2171e1f285744d83b88d3c85ba54ca3efef66514514ecd5dd5c75bcc7d500cbe9d062863870883cf992864141778bbd868683b8b2596e7507bf09cf9e9e10b11ecd333d2d42e254e8fc1ff860801143e52d4475d6460994ab74e4c75339df77d691c28fca4ccb1b000348204c6ee4356ef5e9977f4214bdcaf40a1f104a1b914520ea6e70ce85ba8c40c6382702ebfbd30835851db429d0f1d8f2549fce5cc539104fc225e9d13f27897e3440ebb2f3ec843807e57ea6c6aac617c017cdf8fc9f0b2016ebf8e6a23645c8564e5964892acc6f731b6bfd7e9c2fbf3614b7b0a4a4ea9a3745ca1bb7cc496ef034f2f20185870b1ea330077599fb1d355fcb0b4c98ed5a6609767d873f28013c61180b263b0ca0bf55c6e52c4ce7f63c4880ed325dffbd28c38dd4c8eb7f82e0548d9fa5b01787de60d3d9e0b8aecdbfbb69b21cf9994cda40819bbb10fdd9a77d9df090affc15b622114c33a88171509a822447874f93693d3cd0094e7ef909455f577d14a0edd49cd5a3b8c360c11b9041a1a8035bff9f10925a2f5759a62998e27e510e11ace3079cb4a2fb4a9c47baaf7af7fbe9fc987a283eb2d1b290d55ab19b7af4a02c9010ddaf701be592a7a1defea6e8f1a82d9fd8b152c7561e51c4489990993afc72acdabe4bc4acf48182efbbb6541700207b7882cc00e4a03df564c569c0c058290f012fe2cc4978f5642514655287040469aa0810c0731f6ded27f6e931029398838ab61e06a51b5c9bb44ff2ad5a94904abad2205a9b8f814d341cdd3b43b9664e4cece2b4057e12a664cb2aec4298528b5a0ec5998ab5f37f90e7ef3d4f08188737b62601b43df74ef8b2b3a7cd1b3e0e66ea88bc7b4acdf6e7251336eea1d97d2a12ae65c33bc0df452a404d7627264425119d4b4aa202931bff272e8614b2f87e0861ee612844c8a9949c1f424fda25e9f19c9f1b42fdc8ccf35f25405ed3df83bdea27b67ae40e3a28710bb72ff7dddbf8af746305de1586146c9bd1882870b3967b47d1752f9ce0d9dfb183a86c4032944b8c8be51c836deb8ebf899c1ed4a44873a5f239400b5229ff7a10827e12e041576b4b7eb9e1171900e98a8ee9831071b2d6d833373be52495f452197e90692ded63cffb6ab5ff3e7abe1dceb0fb47386e4f1ad2a439fdd8c0c7d67bff306a04b92b22f6b205986550b2f7603b8393cfa8c0b7c015bf8bdd80dc97b5118f04bff54f03f19f105a1eeebf478d7005f493366fb2b8924de41a8313f416646652f2fe2ee7a38f4d7eebda58d55488f91e05c94b18095d6b4538f9c17b615434943a185fc808a6e1b839439935f971eb721c991d106cdd7fec147aafb8a99c10b08c0b6921bfbf4466b73c12f98bcccc8194bb10357f9520b62e17c89b7198bbcae3edd5edfb0e121a32e8f15d66b008f4d48f9528d29fcbfe65e678bb22164713571a2635c893f1a7cd1ea0a7a1935bc2505c3cfef22097dfbe01f192b82dc134623809f529f87fd576bdcff1c4c5584fb5f19958507d5970b69291bf3d49287d143853f057c708fe67998020ff8a01604cd003a4185ad819fdb7d8b7fccae68ca3901be025ad3cb5740f836f30399c4623c69a6b5f8c535f4ccc4854d8d72bd814eed2ff3305c66ac155c6cd5a65b05334a704acfa4af419a9d71cf028e0e208e39f54010c89ee53357648c994e88ceabb3ccd9dc0579f936ce011b57308d8d5fbd91567de62c4a4004b68509127c6225ef2cfa4e19e056ea605b3391fd0f1de1edf05b23713b120dd5d9edc4e6a17c32cd24ffd31c3b7a6b7512b40378c99f99173abd8269b6cbe5d8a26d68f029a04fd7fd0a01ad6ce1a19d8d5cf9e7f6e5372c4c041dd07a6ab4c59d37abb28e157b0a90a7f24c6d859b42740785e04f7ec9a417d7e74d4eb44ff751c2edd12ee70f1b92b8d30631a31fee1aa039e126b42b7b81b815df972f11ae74266a080da573e7a684f186093614a69619ceec1a3c933869595d5158b91d90bea1604d31f63466408a4a36c029e2b892c41cd8bcfb40c5fc9fa5fffe3596a5ccf7d2cac73b1b1a4561416cc15cbd4110dfedb23ed4bdf0a3fbe3cd1b1be812b289e2679fc6b0c6b8ccdfa2b47de27133f7fb4f879b9208cda73332f1df049feda08c119eef7a86af80f10a259e9199ed9003182fe52dc8c9b57a4137cf0e9508625b123ffb693ae8417b5a58780390d42d0729fb2261066a090605e3c85572bd2609c6b01030f82f940d7fdbed5c314f5441276f2c4458596058d36e10a41a585a72866f9cb7c306a4f1e40d3502b77303e442525458a18eff321220be7509e884c97f9d4bc9ff4b60da1f5d930f30c79fa9462b43c90486f924797350a1e1a5b2562471058bbcddfe6132f8641236bca285f2c51339c9a54606cac6b6a38aca92bccceeb1c851004e2553819e0114313cc4813381741cd857bc253818adb58813e37776a11117a769f4a9030fd8ac980007551511159df6ac0d2c0950834eee199e4654cad602539d5bd2749c7c866ef02e8a421d296fc6b0498fef68e6061686ee9be6cb98ea92a5cae43a6e8d1fa05fa2d89f8fa8fa81662797a4988cca25dd4f739c21019df0b822f65cde38a0a65f1318e491fe215353153881d9b3262f143af2a391f979583c8c945dc985b6de5bc34c1608a3f270890eb8aff31da524ebf40de2a22c55ab90acaef3c24c0256e27cb064d46815f36941548a1258ac786352275ba65e69ac794581ff1ea325f93b92a5934ab9a384c54bbfe7485b74099a83a4d9d43edc82784c387dd694ce3b0473e3710a3d4a19162dbc72a34c853a9b44c226a7691aa1c9ddb56b4ccf43cf570ffda419d98a7104bb4d6210a0875e084d494b4987562840e2dd55fc091602fae24b41a580d2a1f99d4a081b42398e41cae5c55d376ec1181d1aecacba06e5f4392d6094cc4ce68662d5f012814f9982b732af505661c75108ec159719b0b2112cd70f2e39df3533fb14d5516e26513270a2085f28adb2446d4ac72373a0c6223c5ad7c15ac32ec27db4636c6e9e29297df3378f339c43ebe35720b383e1d23c05ff2053b8966eb19680775935092860a92cc095cc97b9f0d91d31d653a7f0c99ab153affeb7ca055ca3a2a3290b707bb202425b0db04886a948e4ea0fde77edecd5f93275bdcbb49a7cf6f2e08d42f474113f4b0978baf3357b5f81e40424e3e16015ffabf003c63351160d3e74f6257b71032fc6b778d237a3a184575d39d3fb126c85133c9c2f0ba983f976d3b8cfc7c10d09f69110c74d1ee8193f737642c2fe77397a0fb67d4d55238a52321ac97bbfce81bc63a6eb3f65d5d8cf294fa0e7dc53b4fe4d0dc48d123c98e5f7da50678191d5308d7c91066fa141e7dd168fbcb2cf21ceef5e0bd44ca3ad299a37a4be132670bbfd68e9b6b822f07c1eedcc021d82e079c5027b8397ffad7da9cf11afa21031f1b95e81a7e36ea009eb584b799dcecec72ffacf296af6f50c72cc3bbc3e435d1946e446a9aa7d2859f76743b7d81af65d8e39573f593bf29f24ca7bfbcf782588214e78638e3f32f5376fcfa4d8e59750fce1fc47bf8da83e6c67770b3d06645958694958326fc92337a088db0ebbbcd34b7cb4822eec0cf850e7bd88c4e69b5195b4e867898028b7b1d82cb348db3a541fa74e83cfb224ae7a0ebb14e995d0441bd99432a891f2ce21892ac71fc9a80c3d685228a18686601d219cfa2325db0"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x2, 0x3}}}}]}, {0x4}, {0x1928, 0x1, [@m_tunnel_key={0xb4, 0x9, 0x0, 0x0, {{0xf}, {0x3c, 0x2, 0x0, 0x1, [@TCA_TUNNEL_KEY_ENC_DST_PORT={0x6, 0x9, 0x4e24}, @TCA_TUNNEL_KEY_ENC_IPV6_SRC={0x14, 0x5, @rand_addr=' \x01\x00'}, @TCA_TUNNEL_KEY_ENC_IPV6_DST={0x14, 0x6, @mcast1}, @TCA_TUNNEL_KEY_NO_CSUM={0x5, 0xa, 0x1}]}, {0x49, 0x6, "809f09e7aa5c2a4f88693ffae10381961fb1d16ebb68320e536ef575e92917a2cbe501ccfe46160c06b2c2330cbe6d2e4f8dad7743c2da818ec756eb23f0afdcb197985e15"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x2}}}}, @m_simple={0xd4, 0x9, 0x0, 0x0, {{0xb}, {0x40, 0x2, 0x0, 0x1, [@TCA_DEF_PARMS={0x18, 0x2, {0x6, 0x588, 0x1, 0x0, 0x4}}, @TCA_DEF_DATA={0xc, 0x3, '\'*/^]\\]\x00'}, @TCA_DEF_PARMS={0x18, 0x2, {0x4, 0xe5d3, 0x1, 0x5, 0x2}}]}, {0x69, 0x6, "70d44c04a4b4b9fded90a2090a07446647db331958f89dd8fc6898152566c265005932d943215013880227d8a8400c78dc7f3b947cf2ca9fcdada9bbca3bbc6ca7548a4bdadc804dd77631bad04ab9b7921c01a1e0d830aa44f22f2c9dfd113950d0dc4ae0"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x3}}}}, @m_sample={0x188, 0x18, 0x0, 0x0, {{0xb}, {0x5c, 0x2, 0x0, 0x1, [@TCA_SAMPLE_TRUNC_SIZE={0x8, 0x4, 0x8}, @TCA_SAMPLE_PARMS={0x18, 0x2, {0x7, 0x3, 0x8, 0x1f, 0x7fff}}, @TCA_SAMPLE_TRUNC_SIZE={0x8, 0x4, 0x1f}, @TCA_SAMPLE_PARMS={0x18, 0x2, {0xa5, 0x0, 0x1, 0xd3, 0xffffffff}}, @TCA_SAMPLE_RATE={0x8, 0x3, 0x7ff}, @TCA_SAMPLE_TRUNC_SIZE={0x8, 0x4, 0xfffffffa}, @TCA_SAMPLE_PSAMPLE_GROUP={0x8, 0x5, 0x7}]}, {0x102, 0x6, "b4ffd523d196d18f99019fa38bca1aec6a654b81befa6ff1cb0bca72d6172b440a7763bd9f10e03f52bdba4bd4bf33e87a302342b4a92c8de405caba3444bde359c099e1b51d7933e4991d38fc24e1f44d176d4c23da8ac594dc460a3121239b054a6f3c77bba349f7bbf42b57f5bc7540ef80d24513c6d6d56939382a0af4691d417c48b14550914f1862029cc4e756265248755d9b2b1012e5aa1f4a658c1d321618bf94e0971765756c74c7366607c9e98a508bd9f401eb6b083e5c8dfcf6024c41228dbcf38a262c81a26bb46080f9e2524dde68cbb8ad83f6d00e43ebde23708d7edc2ae7d9feafbe8f23b2728fbf4eb8f9af49edd3dd25e874da62"}, {0xc}, {0xc, 0x8, {0x3, 0x1}}}}, @m_mirred={0x1b8, 0xa, 0x0, 0x0, {{0xb}, {0x124, 0x2, 0x0, 0x1, [@TCA_MIRRED_PARMS={0x20, 0x2, {{0x401, 0x9, 0x2, 0x1f, 0x8001}, 0x4}}, @TCA_MIRRED_PARMS={0x20, 0x2, {{0x20, 0xffffffff, 0x2, 0x5, 0x6}, 0x1}}, @TCA_MIRRED_PARMS={0x20, 0x2, {{0x81, 0x3ff, 0x1, 0x9fa, 0x4e}, 0x3, r7}}, @TCA_MIRRED_PARMS={0x20, 0x2, {{0x4, 0x2, 0x6, 0x401, 0xffff}, 0x2}}, @TCA_MIRRED_PARMS={0x20, 0x2, {{0x6, 0x1f, 0x1, 0x9, 0xffff8e06}, 0x2, r8}}, @TCA_MIRRED_PARMS={0x20, 0x2, {{0x87, 0x5, 0x0, 0x1, 0xa2}, 0x3, r11}}, @TCA_MIRRED_PARMS={0x20, 0x2, {{0x9, 0x3f, 0x7, 0x80, 0x1}}}, @TCA_MIRRED_PARMS={0x20, 0x2, {{0xffffffc0, 0xffffffff, 0x10000000, 0x400, 0x1000}, 0x1}}, @TCA_MIRRED_PARMS={0x20, 0x2, {{0x2, 0xffffffff, 0x7, 0x7f, 0x80000000}, 0x3, r12}}]}, {0x6c, 0x6, "ab5da86fb2ad38ac3afa3ca2d0cd2122ee2cd97d06a53895d3c0b5b34115ddcfde0ecf58b865716c834153c8bc3c50b7543b6c0cdf8c7962c3b03b6f2240b13fabb7fb29c2076b65f86abfbea4a1559990231f5d8aa2515ffaf553794a3307e98635070792d8cb64"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x3, 0x2}}}}, @m_ife={0x154, 0xa, 0x0, 0x0, {{0x8}, {0x30, 0x2, 0x0, 0x1, [@TCA_IFE_TYPE={0x6}, @TCA_IFE_TYPE={0x6}, @TCA_IFE_PARMS={0x1c, 0x1, {{0x9, 0x9, 0x20000000, 0x0, 0x400}}}]}, {0xfd, 0x6, "3b320608c3d17fbe4bb044d1d06c3fca18ad65be8a88364bbbac4ecb8418e972d8e09b9b7618d44221c4281710ae381efc8aa7d2b05e352f7d649ec32537db7b79d844225424ff3f610c493ce02e237be4335e34172da888d369f69c2fa93de8d1dd66665448475855cb69c14fd0bb565926a820d5fac76b706daff25e74e0b39b5e153783a160809ad5c42edcd88c8deb091b78e529320ed13f80fc1835d9bc0215c07394b4566382e1ce35382619011e02195e7c720286716226b955b39e61a5190cfe5a1aef22b0f2eabfdd6feaf832e716541aabffa63f97515888c96e94809565450759a673dc2e9be5142bd746d23acd37d4b11c1c09"}, {0xc, 0x7, {0x1, 0x1}}, {0xc, 0x8, {0x1, 0x2}}}}, @m_ife={0x9c, 0x1e, 0x0, 0x0, {{0x8}, {0x48, 0x2, 0x0, 0x1, [@TCA_IFE_METALST={0x34, 0x6, [@IFE_META_TCINDEX={0x4, 0x5, @void}, @IFE_META_TCINDEX={0x6, 0x5, @val=0x3}, @IFE_META_SKBMARK={0x8, 0x1, @val=0xa7d}, @IFE_META_PRIO={0x4, 0x3, @void}, @IFE_META_TCINDEX={0x4, 0x5, @void}, @IFE_META_SKBMARK={0x4, 0x1, @void}, @IFE_META_SKBMARK={0x4, 0x1, @void}, @IFE_META_TCINDEX={0x4, 0x5, @void}, @IFE_META_SKBMARK={0x8, 0x1, @val=0xffff7ff7}]}, @TCA_IFE_METALST={0x10, 0x6, [@IFE_META_SKBMARK={0x4, 0x1, @void}, @IFE_META_PRIO={0x8, 0x3, @val=0x8}]}]}, {0x2f, 0x6, "e8f0c370616d4897fcd654704142cb9e2829feb03369921f4d1e5d204aba6297eb3e0e7f3b943c1ed7653d"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x0, 0x3}}}}, @m_ctinfo={0x114, 0x1, 0x0, 0x0, {{0xb}, {0x14, 0x2, 0x0, 0x1, [@TCA_CTINFO_PARMS_CPMARK_MASK={0x8, 0x7, 0xca92}, @TCA_CTINFO_PARMS_CPMARK_MASK={0x8, 0x7, 0x7dcad435}]}, {0xd7, 0x6, "890cb057c5ed5f061f6b62a0b6322ba15166e9d57d8cd2724ed9c98ec7e8eb9762068cc536dd8afef0859d5bc54fdce694a8c0bd50dc05ed803fedc3a94973e1e52b4477940480f624ac9d05387a1e4ef68e704519dcc3ab670eeb9f25a525b64a8f81ca33a460181b8ad1445e4ebeb0d5d479e63f207a14e8f59a8e805b59b12f4747dd5b2be72b7d759e613626f04d0b6c5d26d49ba7844809eb24a6597241468fd923bc185352307b186217c66594996b5f26c7060001b9bc36813e03f86bd8e344697114ab6550478df4014771afb1462e"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x2, 0x1}}}}, @m_ct={0x64, 0x1f, 0x0, 0x0, {{0x7}, {0x20, 0x2, 0x0, 0x1, [@TCA_CT_NAT_PORT_MIN={0x6, 0xd, 0x4e22}, @TCA_CT_NAT_IPV6_MAX={0x14, 0xc, @remote}]}, {0x1d, 0x6, "4489a70062790508a1d241d3a84e5025263f4dcac759d844a1"}, {0xc}, {0xc, 0x8, {0x1, 0x1}}}}, @m_connmark={0x1050, 0x14, 0x0, 0x0, {{0xd}, {0x20, 0x2, 0x0, 0x1, [@TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x253, 0x0, 0x8, 0x9}, 0x9}}]}, {0x1004, 0x6, "6ebb8648c560751d5724e86edbb0d1336a795e412c21adeda67d66863cc67f73163bec203823c9c257bac4f5882e6cfd2d288901552f4b393cf8927a43317385297629ae4c314f255c1cc57e23b89b4eb612fcbaf4ece5afcdeb3e76d233804a3ee6907ac7f689b7187066988a0054148905795099b441b0af8a157f84554a62fc1561befb837b4233b4b2901adc804dd8b77b92d9f8673c92846f33a86b96dc6f45217bf0aa9b3d38f44f351fcc281ffb13d7e272e528e8c267400efd851ba5427cf2bd9b8bfbdd25c7297b814b7089313329d1054f22e6c7e865845cf484dcfff64ca0e39f7d5b9301c95b0a67875860e0cb1afec2b891e3bfecde1d9ad1cccd1cc4e673abcf53dd4ee68756a949f5aae8a2c56840745d12f2b269ca3561007ab156b96c144d2a6345de9cad2d138adf7461c0bbdfa812cc42df6a4efb70b28c074c461aa8827bb4555aaa41a8c50df0899cf1b44c3209e1929e03811c237331fa7c6ecf9f5135f65ae03fbe44e332916db518cc5dc88629fccdac14c4bd1b3042c3adc57e7ff7005e2f1ec90bce8c22184344f0c2bf4e4c4f3f7fed5a1358b5568d90124b54d65fea1902d3ebfca9fccc4f020a4c9d22f20a9cf745f86ba59efde1c28bd6fe6c726a67a387028ebc78834f9b0cecb0221d7292f1d45a9011ceb485befd06a067dc054ce748af5719a6b5f99735026c0f0a12ee6be3d9fcb15ac5c5ded098ab56432ad4866260d6c1f955dac4e2614af921938055114c32bc602b11ecfacec87000ee05330dc00913d586917540a3fbb1ef87d527d50eff8310e09561cee5b47c5e85bb5485d0390aab3fd697fd51fbff1edd5d4543607e34c976d162522a6a8a2dca0179b3524f8eed903ca98f7c8df7b25ce11d063db7f96e7e8ded3c96201f40edf156db80c1ea43ba4418dd10b7bbf092d973503d90f8024bc68f7640e81c6a11cd633788a8ca6bb110827cf7fb21c19e60ec3654363b8f8d4616f89404b59258a226f89a0df4ab184d7b76cc87adf788e9ecf9d2658ad2d9424b1e872ee4d6cf03acce55a768c94a465eb7654232eb4b39f453de8bc65fc731ea41d140a40359480d370bf8f7c432a838c3f9806dbd8439d20c7d06183dec12754fde3f6fc14b0e02a182d564f089fc163c1cb4905866726128929d080a21277514bdff8eb2b9e3e08f5c666063dbfb94248677983c4b64d8ef579d062770264a42cca5d84d0464b3ee8c55281c6b3c3aa3c7877d7de034cbae1553ca3f4449a8bddca7f13c90f98e4a19c7976dd03f48cde4a9ed6220583f514b324ef190015ec1608c6b104a3d1a1b154a81819e3d7bdca949c9ae09144f7f5bfcdac5e71fe022cd2df17ca1de1e301986c8a845a61374c11a2db26005b6ebb6b53d376fc613f9f6c4880c27a7b3f8c85e87a3151e320eddd67a33154ff107e2ad5dbf57da59aa0b3ba21d054d0ce04f888892afc93c37dff332ae9f518e06a3b197f5a6f4330ec266f3d82428c1d59a3a7f9c8d871f6100005ce2533de35c3294a3187548fa1ddc839196a54dba117fac43be04b95d7781cd09a0a024c6b98c0ec02529b9cc2ccfddbac211d1c639c518975b013408ab8cc6c66bcd5a76ff2148b5532e5736086c37876b8214428bb8292851d1b9894cb88d967812b8ce9c499cd87803310d2e816007721d75dd108a56de8ec827feb0bd5fab496e8f807b9a4a13f853018fa546337794af83c8a2cfe9e96a0190af5cafc5df4e5dbee45ce812f5463533e2aa95e15b1aa3b6e037ada0f9f87b88a324b1b2b41ae59af7f088c060d36aa59473f6f9408c01798679afad180fcfd95651a52dfc407ef18461178a1816d9c6c2d03b95868b51c6492814985db042216a0096705d7cc11121933a250c59ded8a5a78fe4d62ce5000b38049b5eee5fd4ff5f5f0f6fdccf99a0999c460c0af0eefa2eb725d063e9ae5cd2b284d3f2ce617dc5ac3d9a3efbe3dd4340de56836e7684e7eafc87127ad0fcf9d585393f4f459352bdfc90eedee43cd428c0334a7946ce474b4740505bf2482b02aa00b473975594a51a9d52d7f66434ae917d4fb41269cb5595adafad227e0cd0c2677e348ea4562dc3b157c97b32ddf59a292eb4aa4d8c04a0027047eb09fe247f7fc5857e00c75382447b72e3082b6afc928af9d94903b11dc3fe5800b60d3a0a42ff10e5823ebdc0a84aa050f8e2768972ff2fb0fb247e97aa9e1f9d229de7e0335dbcec3a84eb6947e79007b2ffde51cfcc76561b61c41e7c85237509e9f1c2c8227711e6827d0a18ec7cd607eee5615ab5c8b87f0b9a022c742adc7af02224d8bbd42c25da21ead470b577b5ff83497979e702703f2ba015bf66bcb9787dc07ae30a8ed785e747deda4f5d2c2f8d799daf884a6270d64e92353b99f5bc806b7f3703d6bd4f1455e8e07422abd439c3cf296e3fc325715fdcae249d302a4d45ed7e83bdbef6911c017b0a24229f55861595d56b97108eb42f336c0b06ea061decfae73ef606199b1914291588539bfcf783f693ac82285bf562f050f49381498a5bb1f88907635c66318ed87c50574e029601a08ac972345458cd69f2c8cb60f4748fde0dabea5ee5085d03f90aafdd2768919f96d0654684eb9d703f5fa0d84a4b54197acc137939621f1b6592aa529532c70c84dc7cbe9619650250b69bfd702a8e1f0a25399743aa66ae3d917b3a3bb28197fd616fa8752537ffa0c997e1fabc9b520206af891cd7d0884ff49d8a1959bbb0356769bd74c4db5b9530caeb67fc8487a9c27f452b62cfdc3292028d766f26fe057575e5b9d18fd0f45d580cb69e8aeb0470620b9ed89202e17108a4c85cf408a4a9f2192e7b4b061df7248fc665da69be540d1ea30d18a8df2fab3a676640b685414d5aeb750671418bfd9bb06b1a4c27529cc5aecc595485a225fe983f6e1ac4df9c110b13530de54d12becc3725a5f7b78d238c1ed3ff46e862f58ea81edd6a114beccf2df466021d8a2fa2ace79166ea52aac2f7cfa59ca95c336243249e1d2be9d6789163d629a09cec52a618494a419c240837695ee5c7400efda0da8a4488bce4356247884c2572ae98f79fbc1be8cab68f062deaff7f1b66ad7012b6b3c7252c0e404cda7141b666ac906aa233b27181aaf09be2497b9124a030b5039343c4abba563e4acc54b7db484b9c4bd8266049566e4844cf78a7d1859da0fb2b301178cb05954b7a5f318387d3eacc8d8cf0d1f47ed97735aa71c13e4eaf3cda94a39f54dea60d65609b2a716b3cfe059a97363b5f97b2467a4fcdc41eaade7198b17973d26a897fd3e7127c1bfd2c1155fb9e8c163875ac3ad32854a2a2bcb6856cbc57f829dae94d054fef75e615e484ac6500f8437cb99d1a81b2984093138e45480cb347c3391df2e78f143baea580874f07076adf2680ee9f835346a212bc75f17dd5a05c26b02734838d7ee7352ee6a2ec93664093fb0299172996358d812f4ea22c776c499f5765b202898c83c6415e8f2e5cb26d1c934c99749dee7eaf70089a0ff3ec99e578024f2f50bf28ccdb075520057bdc478ff28dcae29aad47cfd66f8e6de90e6659a18d21f0a301dbb92d4f9c8de54018ea4adc43832033f7496596cfd96ea70096d9bec2d658ff59dacf0124bad9a067ed129a986942747a805007ca308c109e86056b4f0aaf4ed4a4505206d54cfb8777af6f7124a2d6046f291ad71c556dd1ca9a8d2e0c6011c3a30fc96fa1090994876db5302c6c5732de98da066a8407b738caedcde567e5b054f3146508cd655e1c8a27fd2fff8d429193635a144f2c393fbbd6a8f83d5a9fe0020f50d5e74489a6b5dda26be9f9811bb5d0e0f9685e1d2ba446febbfd20e02bf24571d21316a6d3baa8d7a1a7b76d9cb9f0f20485530908e5f2b1bee7301bd4793f046ed8b3daf376a229ba9448e5b1106a9da951a04ef628e92c87fe8377fe5edc9d786c6829cd0712a0d7a36a2707c76e9c76ff26f77e96db47e5b6d2a579f45531b223ea3924370974c5699cc3059425406e872e5743365d37e34269d4c4d6be500fff04b7700ca30b4a35df5f4d7055e7524fac2c0bff835a3db22ec7d563a7f34f5200de9308e93d6be6f5b463f8ee6f7d066aa5bff9d9f22f3106ee6819f772b8b0ff5684fde997ab5700972ac7c67fef87a04d7199a6242a405588e11ea99cd0c18a0061945e7964afc2c65c37fa392abf344ad9952bc3b74f6ea5e71bae135db305789fde3d8692cf8c652838a58d340c31088d62cee48546273ebca3c05bd12a0642a98c383953d1a4a60d514ee0609589db0f76032111f0c32e889da4d0dd3c4da3e70d528062147fb3c5a08b530ac20cc08e7f0a9b2c5b69ab8696f5ca4fe31bba43170b7835646543ae1304491e4b08633c73675a33ee3677cb5bb42bbb4acfbd49d0003fd1908938aa34652b391d665e59fe2d93af4c10998d9d0ce3cc92679677e842067a71aa782c6b338162f75297b751cabf956a40cf83651ca9481d6e57b75a914234a3ac873e70e427fec94e865cf04cf347fa2ca5e87cce9bcbeb0f18fde00acac70044c5b2fbf6c5f23d92549beec26ada757266d625a69b423453bcb78ec6572f0e824d3ec1221898656b3e428cab22ed7880e0583d9ead5a9a6a4d92b460fee41389b9d220fed4aafd6346fd7f343176ae7084a81da224c7a41f2f91ec186e0b0a71dee4587e9e383b5a78472e0c0e7e82f4aba50b125e672068df7b3757ab43e4c805657689f94fa2ed24a036c8c95f66803abcee187222d126fbd9de9d21ee7bd4e78835f85e7c5c99c5e633fd883d5408889687d2dd98a24634656b9e05361a731860fc3983e3f389e52d522f28bc4d17236c829de5a44c3077cec1c69029322e4564a9a58a307674b18e149175849d78715efdc0f3270280bf9200fc9729275a79153e6420ea77181a17803b487df5c71a2959ef4518f21c255d842b2e50cf2ad55ec49523a2acda13b59181f58936fdea673633dbc897bcef3b2c245779ee10aaa81ebc8d34fbe7dbe638d98a3e3a9c6ad87bbccf9535fec5da5bf899e03975cdbba357cde3fbeef8c5bcafd0be79b059fa936cdb513eb3fa5785b49edd0ae81ce44a99466a9829f542491cdcf26b2385a925d15a2455e597b0d96f1c9a040c8e8c0bce7808abac3572dd00ae11b2aa5c852c06d59bcaff80d239b4609cbd07ce2921dbdd9fa462d4b46f07cb60a0c5f9af56c68967feaecdfd38ca658d2290c53671ccbb31acaf6deb351bf97f60aaf9a019d461d5ff34be828f5462060ed523a6c329fa108d60dabdc093abe33ac0e816212130b3520e8f035a86b5b9276ef4865cdb513d6b8ad4772744c199919fae3b7b6ede401004916d47d3b59936be3c5b4b5e32a4171d18e088206c52a593f82676ba88784313e5bcf103a08b83004e66ce41ae0d288a99b49cc6d30f539f4bf2623e990237b8a6878c14dfdf40c7ee1a1cab0440bd7cf68703a7a2165dea415982f7f3e1d4e6bbaaa24800f07b24f5a5a8fa7c58889be316e50be6ca0b88b5ab2f2f951d27db3e218b4b0bb883918fb503ca3e11f54347aedf4732e29069acd93b8042fd0784dd0c0b1a4aaf28a62b703850162b55234f03c5022acb0e5d4cb34fe3ee8d57a8894b88570a755348332f997751bd6ec51bf27f1792b70e976caa78a0c19c56c2c6465bc5cac5575220e450a504243d383be232824302d14adb7623427f9e6ee258953db86ef4c7d60357e0059e55b5c293a6febc585212a71c391b3e853425d6b018dc20"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x1, 0x3}}}}, @m_csum={0xa4, 0x7, 0x0, 0x0, {{0x9}, {0x20, 0x2, 0x0, 0x1, [@TCA_CSUM_PARMS={0x1c, 0x1, {{0x2, 0x1ff, 0x0, 0xffffffff, 0xfa}, 0x4a}}]}, {0x5c, 0x6, "02fc12910c1fa1fbd15b0a8568dc8b2221df1c7cc4f09acc637d253ccaad4df77282eadb26fdc72728e80db5d2ae50504293746c11ecd32873e29c2ed89ce95882c7eb9a3a8d1a664c3769bd1fb0ce5f356e121bbdef9855"}, {0xc}, {0xc, 0x8, {0x2, 0x3}}}}]}]}, 0x6dac}, 0x1, 0x0, 0x0, 0x40850}, 0x0) [ 2178.970125][ T7572] workqueue: Failed to create a rescuer kthread for wq "bond1403": -EINTR [ 2179.250313][ T7599] validate_nla: 4 callbacks suppressed [ 2179.250331][ T7599] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 10:41:55 executing program 2: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) socket$qrtr(0x2a, 0x2, 0x0) (async) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r1 = openat$cgroup_ro(r0, &(0x7f0000000000)='blkio.throttle.io_service_bytes_recursive\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r1, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) (async) ioctl$FS_IOC_RESVSP(r1, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) (async) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e11cd1bf40efe7ac4d45bbf501328aa18d188ff2a76425b9fa853396b2316e9787d8c6247e9fe04ecae25ee1d48943d42784b8f3", @ANYRES32=r1, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r1, 0x0, 0x0) r2 = openat$cgroup_int(r1, &(0x7f0000000080)='blkio.throttle.write_iops_device\x00', 0x2, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r2, 0x40086607, &(0x7f00000000c0)=0x40) [ 2179.340655][ T7599] workqueue: Failed to create a rescuer kthread for wq "bond846": -EINTR [ 2179.407160][ T7632] EXT4-fs warning: 6 callbacks suppressed [ 2179.407176][ T7632] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:41:55 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xca030000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:41:55 executing program 2: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) r0 = syz_init_net_socket$nfc_llcp(0x27, 0x2, 0x1) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000000000)=0xfffffffffffffbff) r1 = socket$can_bcm(0x1d, 0x2, 0x2) ioctl$EXT4_IOC_PRECACHE_EXTENTS(r1, 0x6612) [ 2179.678907][ T7617] netlink: 'syz-executor.3': attribute type 1 has an invalid length. 10:41:55 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) r1 = accept(0xffffffffffffffff, 0x0, 0x0) (async) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r2 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async, rerun: 64) sendmsg$nl_route(r2, 0x0, 0x0) (async, rerun: 64) r3 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r4 = openat$cgroup_ro(r3, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r4, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r4, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) (async) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r4, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) (async) write$tun(r4, 0x0, 0x0) (async) r5 = socket$nl_route(0x10, 0x3, 0x0) (async, rerun: 32) r6 = socket(0x1, 0x803, 0x0) (rerun: 32) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r5, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000180)=@newlink={0x3c, 0x10, 0x403, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @vlan={{0x9}, {0x4}}}, @IFLA_MASTER={0x8, 0x4, r7}]}, 0x3c}}, 0x0) (async, rerun: 32) ioctl$ifreq_SIOCGIFINDEX_batadv_hard(r0, 0x8933, &(0x7f0000000040)={'batadv_slave_0\x00', 0x0}) (rerun: 32) r9 = socket$nl_route(0x10, 0x3, 0x0) (async) r10 = socket(0x1, 0x803, 0x0) getsockname$packet(r10, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r9, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000180)=@newlink={0x3c, 0x10, 0x403, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @vlan={{0x9}, {0x4}}}, @IFLA_MASTER={0x8, 0x4, r11}]}, 0x3c}}, 0x0) (async) ioctl$sock_ipv4_tunnel_SIOCADDTUNNEL(r1, 0x89f1, &(0x7f0000000080)={'syztnl0\x00', &(0x7f0000000180)={'erspan0\x00', 0x0, 0x8000, 0xc0, 0x4bffa6b6, 0xc33, {{0x30, 0x4, 0x2, 0x10, 0xc0, 0x64, 0x0, 0x0, 0x4, 0x0, @local, @multicast1, {[@lsrr={0x83, 0x27, 0x88, [@loopback, @remote, @dev={0xac, 0x14, 0x14, 0x1f}, @dev={0xac, 0x14, 0x14, 0x39}, @loopback, @multicast2, @remote, @private=0xa010100, @broadcast]}, @cipso={0x86, 0x31, 0xa68f1957ca170c49, [{0x7, 0x5, "2b68b5"}, {0x0, 0xc, "bd6a909a9899389fcb78"}, {0x1, 0x2}, {0x7, 0xc, "317b8617345c148f4008"}, {0x6, 0xc, "e48931abd84a84bf40fc"}]}, @ra={0x94, 0x4, 0x1}, @rr={0x7, 0xb, 0xf9, [@private=0xa010100, @multicast1]}, @timestamp_addr={0x44, 0x44, 0xc1, 0x1, 0xa, [{@rand_addr=0x64010100, 0x9}, {@initdev={0xac, 0x1e, 0x1, 0x0}, 0x1000}, {@multicast1, 0x8001}, {@rand_addr=0x64010101, 0x40}, {@local, 0x2000}, {@initdev={0xac, 0x1e, 0x0, 0x0}, 0x1}, {@initdev={0xac, 0x1e, 0x0, 0x0}, 0xfffffff8}, {@private=0xa010100, 0x5}]}]}}}}}) sendmsg$nl_route_sched(r4, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f00000000c0)={&(0x7f0000000400)=@newtaction={0x6dac, 0x30, 0x200, 0x70bd2c, 0x25dfdbfb, {}, [{0x4d4, 0x1, [@m_mpls={0x14c, 0x17, 0x0, 0x0, {{0x9}, {0x60, 0x2, 0x0, 0x1, [@TCA_MPLS_TC={0x5, 0x6, 0x7}, @TCA_MPLS_LABEL={0x8, 0x5, 0xe1cc8}, @TCA_MPLS_LABEL={0x8, 0x5, 0xbb4b3}, @TCA_MPLS_PARMS={0x1c, 0x2, {{0x401, 0x7fffffff, 0x7, 0x3, 0x6}, 0x2}}, @TCA_MPLS_LABEL={0x8, 0x5, 0x8ff28}, @TCA_MPLS_PROTO={0x6, 0x4, 0x88f5}, @TCA_MPLS_LABEL={0x8, 0x5, 0xa9f14}, @TCA_MPLS_PROTO={0x6, 0x4, 0x9000}, @TCA_MPLS_TC={0x5, 0x6, 0x6}]}, {0xc1, 0x6, "8ba73108021a0532b17f53d4d7d5a4a5b7f12647d76baa978c4eb1b9ddc7f67e6bf7989c39079c02983698608144fafb09425343ca58f160fd1a06456cfbe30512db8afb3804f0194840271a965ce77c7dbd9590b46202e83d6c119bd091833989e4c04635f80869f68544fd33310f22542e6b10a391f443b32037d18aad683c90d84cc1e300f778c18e202534caa59ba39b30b514d1e5d9bc356c1c3f42acf56c09986096b019504fefb866326a18ca4020383a28736f7f7eb7441c21"}, {0xc, 0x7, {0x1, 0x1}}, {0xc, 0x8, {0x0, 0x1}}}}, @m_xt={0xc0, 0x15, 0x0, 0x0, {{0x7}, {0x14, 0x2, 0x0, 0x1, [@TCA_IPT_INDEX={0x8, 0x3, 0x5}, @TCA_IPT_INDEX={0x8, 0x3, 0x3}]}, {0x86, 0x6, "6fff40c44e65569c9d45c0be51936f5ccfe37efcd474a9d04aa434d88165ff17625bb184058fc9d66ae35373254d6a83cb8d18244c613b9dea81bda7d79abc2fe88eb0a61bb9ab1c62637bc7ffc5b37b9e0d89aa0f920c1943e94594aee3f5641245060900abb6f384cc585afdb7f02557b4bf010b22cc80bd82f065df522e44075e"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x1, 0x3}}}}, @m_connmark={0xfc, 0x9, 0x0, 0x0, {{0xd}, {0xac, 0x2, 0x0, 0x1, [@TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x4, 0x80000001, 0x5, 0x8, 0x6}, 0xa89}}, @TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x8, 0x963e, 0x5, 0x703f, 0x9}, 0x8}}, @TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x25f0, 0x400, 0x2, 0xfffffffc, 0x8}, 0x9}}, @TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x3ff, 0x9, 0x8, 0x7, 0x6}, 0x4}}, @TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x6, 0x29, 0x6, 0x6, 0x2}, 0xca5e}}, @TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x5, 0x7f, 0xffffffffffffffff, 0x1, 0x81}, 0x2}}]}, {0x24, 0x6, "c387dbe2c6dd6d34897459526e144ec5ac3b8c9b8218f27fea4b843326c185f9"}, {0xc}, {0xc, 0x8, {0x1, 0x2}}}}, @m_mpls={0xb8, 0x1a, 0x0, 0x0, {{0x9}, {0x24, 0x2, 0x0, 0x1, [@TCA_MPLS_TTL={0x5, 0x7, 0x40}, @TCA_MPLS_PROTO={0x6, 0x4, 0x88f8}, @TCA_MPLS_TTL={0x5, 0x7, 0x46}, @TCA_MPLS_TTL={0x5, 0x7, 0x8e}]}, {0x6c, 0x6, "3bc9286a2fe4c2e2c932b2e656d8583eec41af4595023cc4d6b4de02be435be75708fab2f04cce87a1467245514e2ffb90316bb58d2657ebfb644e7706519a9f76832274acdd474324a355883defa9275b65692ea3683eba43fe21e523ce14690d14eace71b7a2fa"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x2, 0x2}}}}, @m_ctinfo={0x110, 0x8, 0x0, 0x0, {{0xb}, {0x4c, 0x2, 0x0, 0x1, [@TCA_CTINFO_PARMS_DSCP_MASK={0x8, 0x5, 0x86c}, @TCA_CTINFO_PARMS_DSCP_MASK={0x8, 0x5, 0xd09}, @TCA_CTINFO_ACT={0x18, 0x3, {0x3968, 0x3f, 0x0, 0xe1ae, 0x6}}, @TCA_CTINFO_PARMS_DSCP_MASK={0x8, 0x5, 0x9e}, @TCA_CTINFO_ACT={0x18, 0x3, {0x3, 0x3, 0x5, 0xfffffffe, 0x2408eaf6}}]}, {0x99, 0x6, "b5019a429f1cc5a17c713a62d95586ce546dabd8bc0cc16edfab0f71357cdb428381534a495848655ed999837fbe8d129f7f5e9e5014bf54d0136fcd5f3809260b465f1fb0b584c62289695fe7faf1d51fabbff0fccc785b7e49a04ea9d450fce2c64cf7175dfbb43de8da60f33255c9a950a36c7393df50a22f64da4072c38184630f75ab1f1a3ac4c866a96054f115cd5a04dfd1"}, {0xc}, {0xc, 0x8, {0x2, 0x3}}}}]}, {0x60c, 0x1, [@m_connmark={0x158, 0x18, 0x0, 0x0, {{0xd}, {0x3c, 0x2, 0x0, 0x1, [@TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x3f, 0x1, 0x10000000, 0x20, 0xed3a}, 0x1}}, @TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x7, 0x2, 0x20000000, 0x9, 0x4971}, 0x1}}]}, {0xed, 0x6, "ef1e8908a063b1a4702405531a175033a476526dfa656a00d505677b10ee9ff65f780e80bc862661ffceeffd5a10e8d32a1fe08e5b24b0fb87a4812d8ee82375928c1a9ac08af2c6dd1e378e3d21b24b555cc06166f2e8a1a92b8fa2be12b5cec402d27706362816ddafb2ee918c507c788baf60d8c3ce1480db393bd8e8d7562ad84551395f6a163b3a74be6648ba27b378274440f2173dd1d70ca8b7404ccc9dbe74ae4c115ac0996b02ca3cb472666b41a88d3bb5d4cd87e52e9accba0a28d37fb7a1ea027007a7a3083249bdaf7175ef8dac9fb255ee6341a89271a703fb537a45d658df889df9"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x1, 0x2}}}}, @m_vlan={0x60, 0x12, 0x0, 0x0, {{0x9}, {0x30, 0x2, 0x0, 0x1, [@TCA_VLAN_PUSH_VLAN_ID={0x6, 0x3, 0x88e}, @TCA_VLAN_PUSH_VLAN_PROTOCOL={0x6, 0x4, 0x88a8}, @TCA_VLAN_PARMS={0x1c, 0x2, {{0x0, 0x200, 0x0, 0x2, 0x4}, 0x3}}]}, {0x7, 0x6, "80c4b0"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x3, 0x3}}}}, @m_csum={0xe8, 0x1c, 0x0, 0x0, {{0x9}, {0x20, 0x2, 0x0, 0x1, [@TCA_CSUM_PARMS={0x1c, 0x1, {{0x43, 0x2, 0x3, 0x4, 0x4ed}, 0x7c}}]}, {0x9e, 0x6, "8d2d1a4fc178bdd9af7215770320ee5d189796b9523885d646b9faad72171995ba0cb9b53f04e5fed40ddf5887ad50a044d785a2b6e11736e8f506ee3925b6cd964a3c754d5e75751422e595c3a681e54e8dbd9c573a21a9124392cff8b41422d1cceefa07091cda24e9e334d5b83cdeecfbd53be23d85a20c1e27855b9b1bdccb118e59bdb68e52b174d58f58c1014ef0a59d0a280533d7c5c2"}, {0xc, 0x7, {0x1, 0x1}}, {0xc, 0x8, {0x1, 0x3}}}}, @m_ct={0x120, 0x13, 0x0, 0x0, {{0x7}, {0x64, 0x2, 0x0, 0x1, [@TCA_CT_PARMS={0x18, 0x1, {0x7, 0x0, 0x3, 0x9, 0x1}}, @TCA_CT_MARK_MASK={0x8, 0x6, 0x9}, @TCA_CT_NAT_IPV6_MIN={0x14, 0xb, @remote}, @TCA_CT_NAT_IPV4_MIN={0x8, 0x9, @multicast1}, @TCA_CT_LABELS={0x14, 0x7, "e817e941fccd601a0241835ba7bf6acf"}, @TCA_CT_ACTION={0x6, 0x3, 0x1a}, @TCA_CT_NAT_PORT_MIN={0x6, 0xd, 0x4e24}]}, {0x97, 0x6, "c1220e078d92ad552f095b77da0c43c010315f85d69609130bd223c1257ca62f2ea94b2203ff230898e2afe5c7175f57dde2aeee8bd716cd86c1bec5275aeadb78c16e27aaaa47b0d1ca3142be7316e89d44ae4d73148aad37fb64ab91fd7289c874d6bcbfb874bace16f13e8f04cedff00022370917f89069a874267853f3a83697bd7fdce8c59761c1d90ef95d4f4d246db3"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x1, 0x3}}}}, @m_bpf={0x11c, 0x11, 0x0, 0x0, {{0x8}, {0x10, 0x2, 0x0, 0x1, [@TCA_ACT_BPF_NAME={0xc, 0x6, './file0\x00'}]}, {0xe5, 0x6, "6b9dab9f6fd939cc8613e252c3955e11b7698e4be3ce6e84222a2d535aea2af1288c77e2f1c4d06fa5cc837a86730cd7b37f67ed44c6596d420101516176be79b9489b878142e831fdc105abbf8da0255781d0e58195d79d80c71d8bc1e3b8bc515b8d6e2968e075897670729dbba840171bb38595cca57fe1982b190646ad4c6129c808b32748adac3b9f6bdaf889e962e822ac10869126c0190aaec2637a5b0fafdc782835d05ffc83f312d2ef25031e02ad44cd5e09ad29ace41583b95517ca021d03cdf7367eb9eb680038c4664bd407dfaf73c919ced26f722c160b2c6254"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x1, 0x2}}}}, @m_ct={0x12c, 0x1f, 0x0, 0x0, {{0x7}, {0x20, 0x2, 0x0, 0x1, [@TCA_CT_NAT_IPV6_MIN={0x14, 0xb, @private2={0xfc, 0x2, '\x00', 0x1}}, @TCA_CT_ZONE={0x6, 0x4, 0x15}]}, {0xe5, 0x6, "3a6787e64b422053ee5a680a0634a4b4209e0f088a82c84842852c43fd1fc291c1bb3da34346ec12fa321aceed055f6f89391bcc05c4663003f2339afa7966cbccef2f42731d34f0efa4d7c7c5e03fa0b9203065eabdc9449b13a2ba3a2194d85aa1903324808e338b6ebd6dfff0b08e93fc6ebb456e36033756efc255d0c6629d64a8918c5ef04c2fb6da65a2015dc73cd1a5c46b09f4b8f488a26d523d2d639cc89bcd4e458c6b4c67572f164eabd9f1f80b03435a04c78b579be348e9d5c3a11d234bb763e0065b39e72859d95e36515c1db55ef47c1bed4d051ceefdc2ea79"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x1, 0x3}}}}]}, {0x1b1c, 0x1, [@m_nat={0x15c, 0x20, 0x0, 0x0, {{0x8}, {0x54, 0x2, 0x0, 0x1, [@TCA_NAT_PARMS={0x28, 0x1, {{0x8, 0x6, 0x2, 0x100, 0x10001}, @broadcast, @multicast1, 0xff000000, 0x1}}, @TCA_NAT_PARMS={0x28, 0x1, {{0x1, 0x0, 0x7, 0x8, 0x233}, @broadcast, @remote, 0xffffff00}}]}, {0xe3, 0x6, "0e91a33d18b46768bf314c74682c8c90d6e614cd533cdb45d483fe747a83398983b4f9f116baf43e65d6a6274d035ea99ede57842a5937ae74998cb4bb7498eb79c5c691a480758d3a178707c20a0ed7943bd91ab813caeb2b87a46149a6ac6331d350f99c74ec334003de2dfafa49fe6254530a6282bcfd2a4499321b9fa752dab47b79eaccede9200302236238f2df99d801a91367aae0c23691970e7eeefa52e947b154df370c59d901c3ba7ce382ddfaa5c2e2cc56864f528a6dc79110edc484a210f4e8fd6c1fafd2ecd91fbcd022aa2eeae993fc9132bddb8b5405fa"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x2, 0x1}}}}, @m_police={0x1274, 0xc, 0x0, 0x0, {{0xb}, {0x117c, 0x2, 0x0, 0x1, [[@TCA_POLICE_RESULT={0x8, 0x5, 0x6}, @TCA_POLICE_PEAKRATE={0x404, 0x3, [0x2, 0x8, 0x2, 0x5, 0x20, 0x6, 0x2, 0x7da, 0x2, 0x3, 0x1ff, 0x800, 0x0, 0x800, 0x7fffffff, 0x4, 0x101, 0x40, 0xff, 0xe5, 0x0, 0xa2c0, 0x6, 0x100, 0x7f, 0xfa, 0x1, 0x5, 0x1, 0x5, 0xd0e, 0xcc, 0x80000000, 0xaf, 0x9, 0x6, 0xffffff7a, 0x0, 0x473d, 0xe79, 0xc2bf, 0x9, 0x4, 0x4, 0x6c03d9d6, 0x81, 0x6, 0x7fffffff, 0x6, 0x1, 0xa2d, 0x8625, 0x7fff, 0xffffff01, 0x80000000, 0x0, 0x3, 0x1, 0x0, 0x9, 0x0, 0x2, 0x1000, 0x2, 0x6204, 0x5, 0x8, 0x8, 0x0, 0x4, 0xfffffffc, 0x800, 0x502, 0x766, 0x1ff, 0x80000000, 0x7, 0x5, 0xff, 0x6, 0x7ff, 0x8000, 0x7, 0x7, 0x8, 0x20, 0x8001, 0x9, 0xfffd, 0xca5c, 0x6, 0x2, 0x3, 0x2, 0x3, 0x6, 0x9, 0xfffffff7, 0xffff, 0x7, 0x7ff, 0x80, 0x1ff, 0x7, 0xd62d, 0xf00, 0xffff, 0x0, 0x101, 0x0, 0x1, 0x400, 0x6, 0x7fff, 0x3c, 0xd0f, 0xfffffff9, 0xee, 0x2, 0x1, 0xffffffff, 0x6, 0x3, 0x2, 0xffffffff, 0x72f3, 0x236, 0x0, 0x9, 0x3, 0x7fffffff, 0x20, 0x4, 0x1, 0x9, 0x9, 0x7, 0x3f, 0x3, 0x9, 0x7b, 0x83c, 0x4, 0x1, 0x7f, 0x400, 0x0, 0x1, 0x1, 0x100, 0x8, 0x8001, 0x6c2a, 0x7fffffff, 0x10000, 0x401, 0x0, 0x3, 0x1ff, 0x3, 0x4, 0x200, 0x5, 0x3, 0xefb, 0x1, 0x1, 0x7, 0xff, 0xe7b3, 0x148, 0x2, 0x401, 0x7, 0x7, 0x9, 0xfffffcaa, 0x3, 0x401, 0x6, 0x8001, 0x9, 0xd724, 0x5, 0x426, 0x3, 0x2, 0x8, 0x64, 0xfff, 0x3ffc, 0x0, 0x0, 0xe1, 0x9, 0x10000, 0x6, 0xfe000000, 0xae31, 0x41, 0xffff, 0x10000, 0x3f, 0x80000000, 0x200, 0x9, 0x0, 0x3, 0xffffffff, 0x101, 0x5, 0x8000, 0xff, 0x3f, 0x8, 0x7fff, 0x7fff, 0x8, 0x0, 0x2, 0x4, 0x4, 0x800, 0x169b, 0x0, 0x3, 0x2, 0x1f, 0x9, 0x9, 0x6, 0x4, 0x3e000000, 0x3f, 0x6, 0x8, 0xf195, 0x9, 0x101, 0x8, 0x24000, 0x1, 0x1, 0x2, 0x8, 0x6, 0x2, 0xffffff7f, 0x5c34, 0xffffffff, 0x5e22339, 0x800, 0x4, 0x7]}, @TCA_POLICE_AVRATE={0x8, 0x4, 0x17}, @TCA_POLICE_RATE64={0xc, 0x8, 0x4}, @TCA_POLICE_RATE64={0xc, 0x8, 0x4}, @TCA_POLICE_RATE={0x404, 0x2, [0x2000000, 0x6, 0x3, 0x49c2, 0x2, 0x7, 0x8001, 0xfffffffb, 0x60f, 0x2ee, 0x7, 0x4, 0xffffffff, 0xc14, 0x0, 0x8, 0xff, 0x3, 0x73a4, 0x200040, 0x8000, 0x6, 0xcc, 0x87e, 0x6, 0x7, 0x2, 0x7f, 0xffff, 0x7, 0x7, 0x140000, 0x8001, 0x8001, 0x5c5, 0x3f, 0x9, 0xda37, 0x5, 0x101, 0x5, 0x9d, 0x4, 0x64, 0x9, 0xc4b, 0x1, 0x8000, 0x100, 0x9, 0x7fffffff, 0x5156, 0x5, 0x8001, 0x80000000, 0x1, 0x7, 0x7, 0x1ff, 0xffff, 0x3, 0x5, 0xdc16, 0xfffffff8, 0x51, 0x800, 0x83, 0xffff2fdc, 0x7fffffff, 0x10001, 0x7f, 0x100, 0x1, 0x8, 0x6, 0xc0, 0xff, 0x7fffffff, 0xffff0000, 0x0, 0x9, 0x0, 0x0, 0xffffffff, 0x1, 0xfe0, 0x71, 0x10001, 0x0, 0xf27, 0x8, 0x10001, 0x8000, 0x7, 0x80000000, 0xb5, 0x7fffffff, 0x913, 0x0, 0x0, 0x8f7, 0x7, 0x7, 0x88, 0x1, 0xffffffff, 0x0, 0xfffffffe, 0x3, 0x6, 0x7, 0x0, 0x3, 0x1, 0x2, 0x0, 0x4, 0xbd06, 0x0, 0x60, 0x2, 0x8aec, 0xffff8000, 0x1ff, 0x1000, 0x8, 0x6, 0x8, 0x20, 0x8, 0x56, 0xa2a, 0x2, 0xad1b, 0x82f4, 0x6, 0x24f, 0xffff, 0x0, 0x80, 0x4, 0x7ff, 0x5, 0xfffff001, 0x4, 0x5, 0x9, 0x5, 0x2, 0x400, 0x6, 0x4, 0x401, 0xfffffff9, 0x1, 0x3, 0x3f, 0x1f, 0x3, 0x2, 0x100, 0x8, 0x40, 0x0, 0x8, 0x3, 0x20, 0x897, 0x5ad, 0x5c2d, 0x6, 0x101, 0x4, 0x10001, 0x571, 0x400, 0x7, 0xfffffff8, 0x1, 0x5, 0x1, 0x95a, 0x10001, 0x8, 0x9, 0x1, 0x1a05, 0xbe, 0x10000, 0x6, 0x4f, 0x7, 0x3ff, 0x3f, 0x8, 0x2, 0x2, 0x389, 0x114c, 0x9, 0x8, 0x0, 0x20, 0x1, 0x4, 0x3, 0x0, 0x0, 0x0, 0x80, 0x2, 0x2, 0xfffffffe, 0x1f, 0x1, 0x1, 0x2, 0x6, 0x2e72, 0x5ca, 0x8001, 0x4, 0x9, 0x100, 0x80000001, 0x7f, 0x5, 0x7, 0x3, 0x6, 0x1, 0x6, 0x0, 0x9, 0x0, 0x8, 0x0, 0x0, 0x5, 0x6, 0x7fff, 0x1, 0x80000000, 0xb45, 0x7, 0xd2, 0xa2, 0x6, 0x0, 0xffff, 0x5, 0x46e, 0x7fffffff, 0x5, 0x4]}, @TCA_POLICE_TBF={0x3c, 0x1, {0x3, 0x20000000, 0x101, 0x800, 0x100, {0x0, 0x2, 0x9, 0xffff, 0x9, 0x9}, {0x0, 0x1, 0x2, 0x4, 0x5, 0x8001}, 0x1, 0xfffeffff, 0x4}}], [@TCA_POLICE_RESULT={0x8, 0x5, 0x6f}, @TCA_POLICE_TBF={0x3c, 0x1, {0x3, 0x0, 0x81, 0x4, 0x9, {0xff, 0x1, 0x1997, 0xffff, 0x2635, 0x40}, {0x3, 0x2, 0xfff, 0x3, 0x1, 0x7}, 0x5, 0x6, 0x436}}, @TCA_POLICE_RATE64={0xc}, @TCA_POLICE_RESULT={0x8, 0x5, 0x4}, @TCA_POLICE_TBF={0x3c, 0x1, {0xffffffff, 0x7, 0x3, 0x3ff, 0x64000000, {0x7f, 0x2, 0x8000, 0x8001, 0x2, 0x19}, {0x9, 0x3, 0x4, 0x85a, 0x0, 0x6656}, 0x5, 0x3ff, 0x81}}, @TCA_POLICE_AVRATE={0x8, 0x4, 0x7}, @TCA_POLICE_AVRATE={0x8, 0x4, 0x5}], [@TCA_POLICE_AVRATE={0x8, 0x4, 0x8}, @TCA_POLICE_RATE64={0xc, 0x8, 0x9}, @TCA_POLICE_AVRATE={0x8, 0x4, 0x6}, @TCA_POLICE_TBF={0x3c, 0x1, {0x4, 0x2, 0x80000001, 0x3, 0x6, {0x6, 0x2, 0x3ff, 0x0, 0x7fff, 0x5}, {0x4, 0x1, 0xf456, 0xffff, 0x35, 0x40}, 0xff, 0x8, 0x8}}], [@TCA_POLICE_RATE={0x404, 0x2, [0x2, 0x7fff, 0x5, 0x3, 0x4fb, 0x7ff, 0x0, 0x3ff, 0x7, 0xffff, 0x3, 0x1ff, 0xbf, 0x7fffffff, 0x8, 0xff, 0x8000, 0x80000001, 0x0, 0x2, 0x80, 0xfffffff9, 0x7, 0x8, 0x1000, 0x7, 0xa8a6, 0xfffffffb, 0x0, 0x0, 0x1c, 0xf11c, 0x101, 0x800, 0x1, 0x1, 0x7ff, 0x0, 0x7, 0x8, 0x0, 0x200, 0x6, 0x9b0c, 0x3, 0x2, 0x9, 0x9, 0x2, 0xafd, 0xffffffff, 0x3, 0x0, 0xf10, 0x2, 0x80000001, 0xa3bc, 0xfff, 0x2, 0x100, 0x3, 0x8, 0x1, 0xffff8c67, 0xbb6b, 0x3, 0x800, 0xfffffff9, 0x8, 0x34, 0xffffffff, 0x4, 0x40, 0x7, 0x0, 0x2, 0x2, 0x2, 0xfffffffe, 0x9, 0x5, 0x3f, 0x2, 0x6, 0x401, 0x8, 0x72, 0x5, 0x529, 0x4, 0x76d, 0xd47, 0x7, 0x8, 0x8001, 0x7, 0x5, 0x80000000, 0x81, 0xffff, 0x16b7, 0xd0, 0x8001, 0x5, 0xb5, 0x8001, 0x7, 0x5a, 0x10001, 0x3, 0x100, 0x0, 0x8000, 0x8, 0x8, 0x2, 0x1, 0x6, 0xffff, 0x1, 0xb8, 0x3f, 0x1, 0x3, 0x6, 0xa1a, 0x7, 0x6, 0x120e, 0xfffffff8, 0x2, 0x1, 0xf7, 0x2, 0x6, 0xfffffff8, 0x0, 0x7, 0x5, 0xfffffffc, 0x7ff, 0x9c, 0x110, 0x9, 0x562, 0x4e80, 0x0, 0x0, 0x9, 0x6, 0x5a0, 0x400, 0x30, 0xffff8fef, 0x8000, 0x800, 0x7ff, 0xffffffff, 0x3, 0x8, 0x2, 0x20, 0x63, 0x9, 0x8001, 0x4, 0x6, 0x4, 0x3e0000, 0x8001, 0xfffffffb, 0x0, 0x3f, 0x800, 0x1, 0x1487, 0x1ff, 0x5, 0xac80, 0x1, 0x74b7, 0x800000, 0x80000001, 0x80, 0x8, 0x8, 0xfffffffd, 0x400, 0x54, 0x0, 0x4, 0x200, 0x8, 0x5, 0x2, 0x5, 0x6, 0x7f, 0x3, 0x0, 0x1000, 0x4, 0x33, 0x7f, 0x8, 0x974, 0x3, 0x1, 0x3, 0x6, 0x9, 0xab, 0x800, 0x100, 0x1, 0xf30, 0x10001, 0x2, 0x9, 0x0, 0x2, 0x5, 0x9, 0x401, 0x4, 0x8, 0x0, 0x180000, 0xf8, 0x10001, 0x1000, 0x7ff, 0x159, 0x3, 0x540fe27a, 0x8, 0x9, 0x8001, 0x4, 0x7, 0x4, 0x6, 0x4, 0x4, 0x3c, 0x7, 0x1000, 0x97000000, 0x101, 0x0, 0xffff8001, 0x2, 0x4, 0xffffffc1, 0x88ae, 0x4]}], [@TCA_POLICE_RATE={0x404, 0x2, [0x7, 0x831cd8b, 0x9, 0x2, 0x2, 0x52, 0x4, 0x3913, 0x1, 0x2, 0xffffff0a, 0x9, 0x1, 0x1, 0xffff0, 0x3a137682, 0xff, 0x9, 0x251fc07a, 0x4, 0x80, 0x605d, 0xff, 0x6, 0x0, 0xffff, 0xe1, 0x0, 0x0, 0x20, 0x0, 0xfffffffa, 0x5, 0xa7, 0x15, 0x101, 0x36d, 0x9, 0x1, 0xffffffff, 0x3, 0x5bb, 0x8000, 0x4, 0x400, 0x2d9, 0x6, 0xd0d, 0x2, 0x4, 0xfffffffc, 0x5, 0x5, 0x1, 0x63952b1b, 0xff, 0x3, 0x16a73061, 0x3, 0x2, 0x8, 0x9e, 0x0, 0xfff, 0x68, 0x7, 0x4, 0xe9b5, 0x80, 0x5, 0xffffff17, 0x1d0000, 0x1, 0x2, 0x2, 0x8, 0x7, 0x8000, 0xd00, 0x4b0f, 0xffff0001, 0x670, 0x8, 0x81, 0x4, 0x8, 0x7ff, 0x7ff, 0x6, 0x4, 0x20, 0x1, 0x1, 0x400, 0x0, 0xffff, 0x0, 0x7, 0x3, 0x9, 0x4, 0x8, 0x5, 0x9, 0x400, 0x0, 0x80000000, 0x9, 0x8, 0x8, 0x525f, 0x7fff, 0x7, 0x8, 0x6, 0x1, 0x6, 0xa0db, 0x3f, 0x4e07, 0x6, 0x100, 0x1, 0x6, 0xfff, 0xfffffffa, 0xf2, 0xf10, 0x7, 0x5, 0x7, 0x2, 0xc2, 0x3, 0xff, 0x0, 0xffffffff, 0x7, 0x800, 0x5, 0x3, 0xc7, 0x1000, 0x467, 0x2, 0x9, 0x4, 0x5463, 0x153, 0x3, 0x1000, 0x8, 0x9, 0xffffffff, 0x8, 0x3, 0x0, 0x8a0, 0x7fffffff, 0x8, 0x5, 0xe3, 0xdb, 0xf703, 0x86f, 0x3f, 0x1f, 0x7fffffff, 0x8, 0x1, 0x7, 0xffff0d5e, 0x5, 0x9, 0x81, 0x1, 0x22, 0x1, 0x5, 0xf, 0x10000, 0xfffffe01, 0x8000, 0x76, 0x6, 0x7, 0x5ec66bf2, 0x7fff, 0xfffffffc, 0x1f, 0x7, 0x97c, 0xa333, 0x0, 0x1ff, 0x101, 0xff6a, 0x0, 0x46, 0x9, 0x80, 0x10001, 0x3, 0x1, 0xf338, 0x4, 0x10001, 0x5, 0x9, 0x2, 0x7ff, 0x62b4, 0x9, 0x1, 0x7, 0x200, 0x3, 0x81, 0x1, 0x7f, 0xffffffff, 0x2, 0x3, 0x0, 0xffffff01, 0x8, 0x7, 0x9, 0x1, 0x20, 0x80000001, 0x6, 0x7, 0x4, 0xfffffffa, 0xb3, 0x1, 0x3, 0x1000, 0x9, 0xe17, 0x8, 0x5, 0x39, 0x6, 0x10001, 0x3e, 0x77f2, 0x9, 0xffffffff, 0x62, 0x7f, 0x8, 0x1ff, 0x6, 0xfffffff9]}, @TCA_POLICE_RESULT={0x8, 0x5, 0x80}]]}, {0xcd, 0x6, "71fdecd6a4f4fb667fc46b473ccf4401974ede14ce09a5bd8a789176a30fba411269897ba5a13f9152007d72838401e04488571a2fd25cb34693b76fc0d34200f3e3d0024c706661d4245eeecadcd51e3a3b5861a4ffbfb58f40134829ac521727d993cb95dc55a9fd053c1713597e41e5ae46a345990548d39c5b63ddd20f357ecc037dc6b6d09c94e3099066a7256df67b0a27383f86b39614d349a21a19bdcd577fa190b9fd9f4d941a0a276b236f22234fb5f67cc87f39a2652cc0b97152ba7ac752eeced6736c"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x1}}}}, @m_pedit={0xbc, 0x1a, 0x0, 0x0, {{0xa}, {0x3c, 0x2, 0x0, 0x1, [@TCA_PEDIT_KEYS_EX={0x18, 0x5, 0x0, 0x1, [{0x14, 0x6, 0x0, 0x1, [@TCA_PEDIT_KEY_EX_HTYPE={0x6, 0x1, 0x3}, @TCA_PEDIT_KEY_EX_HTYPE={0x6, 0x1, 0x3}]}]}, @TCA_PEDIT_KEYS_EX={0x20, 0x5, 0x0, 0x1, [{0x1c, 0x6, 0x0, 0x1, [@TCA_PEDIT_KEY_EX_HTYPE={0x6, 0x1, 0x1}, @TCA_PEDIT_KEY_EX_CMD={0x6, 0x2, 0x1}, @TCA_PEDIT_KEY_EX_CMD={0x6, 0x2, 0x1}]}]}]}, {0x55, 0x6, "e8b714955aff02beba9f979ee73dad360e4744faed71507eeea46ae5cc0a7feb948665465597cfc2ac08a2b3d689480cb6b28b706d3dfd131a1d82f8b0ccd0ad944b95f9755b2d835893f837a1782c3ae9"}, {0xc, 0x7, {0x1, 0x1}}, {0xc, 0x8, {0x2, 0x3}}}}, @m_xt={0x4ac, 0x13, 0x0, 0x0, {{0x7}, {0x3ac, 0x2, 0x0, 0x1, [@TCA_IPT_INDEX={0x8, 0x3, 0x5}, @TCA_IPT_TARG={0xf7, 0x6, {0x400, 'security\x00', 0x3f, 0x401, "6e355aabb9a96176f9f1298b7364f73780a79ab038faa790a6c2e06293087fc73c1493b99b9bb9b1c3cbe0fa3ac232992561b88403893acd340f47b53da247ceaf0840a9b1fa846ff434e17b5bf74b89a15a857bba848203c72a0f3bd6b5cd42aef06569b882542732a14f4c9f08485d92723ceb4a9236427174e9f0a6cd49a30590a03e77e2ba9d93aef89bc08712c0fed4109e169bbfd559116abb25790c7f721024fc0501cadc59c72aef7ebe7a88082aea0c581bfb52a56af0157cda37e481086b3eee2dfbf628717ea302"}}, @TCA_IPT_INDEX={0x8, 0x3, 0x20}, @TCA_IPT_TARG={0x11a, 0x6, {0x1, 'nat\x00', 0x4, 0x100, "95b759431547ace99f61372e00e4a99ac7bd9849d3f71efcd7b2c2162ffabdfa9c94b20125edd7eb65b749af0a13fb50f796d31a5bd05c912d017ae0330befa01b045b394103668188720e2694925636d0ebd9592bf8131ee4e68163739ba55708bbcfda3dd35579ee5d6d2d8b879aa121d56333f901a5d108abbf7e5bcdd7cec2e14c2f98a790e60f9f67250096f352a9a97ed83d01f725a1d47a27482334701a0c0ae97a84f0fff574572bbba548600f116fc9d5932f10a1d5555f6cf7f9a8685c8753c46dfd964d84f06f473e93346f6a5300ed4e7f82dc1bc704f45840795e8991e970f276a93b0247187820224b"}}, @TCA_IPT_TARG={0x9c, 0x6, {0x5, 'nat\x00', 0x42, 0x3, "b5c258996c0dec54f66b2adbb42bd42f61831ce342304b84aeb3710348536ce78c413e9a204ca951e0badc055c59963dcad0a5ede5df17e54db96eb5cb87c1fa2ba6ad22c8f646cf96c5e04da06863edf81c228fea3fc7b2ed622344c750ef8f3e2d6401a8d7cb1e204b3c893d10987f0f23"}}, @TCA_IPT_INDEX={0x8, 0x3, 0x1b50}, @TCA_IPT_TABLE={0x24, 0x1, 'filter\x00'}, @TCA_IPT_TARG={0xbb, 0x6, {0x4, 'filter\x00', 0x0, 0x200, "2e14dc42a439f686faafdca2af378fd9721362dab9f2bb2da2fa7ae34b609d21a3575b9a10235a0ee99524d91d8a7c32cf5e369f364429d6b618a1ab2211968b323cb8c8e573293aca15125d3979de48e647a72c967a1030ed8cf6a0dbc770bce2d2867008f80de2ca1af384fb33a02f9f3959e42c758a9677b78d35f23438dcd3b5e5b3e3de15e1887c7f6f88ac206101"}}]}, {0xd9, 0x6, "b93d34c391cb4d535592434f9a7c9d68135fe32abd27380f1d64ca820a986cbdff0300df727037654e1f2362e8154d8d5a535190cfe9627aabfb6c17be6feb67f0ef7b31a295c6bbb4fd3eae2ae784e4c66cfedf7b1a050aa4d93368b43825dfdf46906f7130dc4b70337be98cabade89e90eee6be4717611aee366e9c4d156ae1303f164bdde6e22de23d188c48351b0068a76fd3fb3e4278ba8f9412c92c179554a234ea624bf4b0aa6637ddd410e55680c7acb904e934dcbb9005e66b4fdb9f8f3aa55dad4f63f25190a71a6a102661e3225b80"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x3, 0x1}}}}, @m_nat={0x174, 0x1f, 0x0, 0x0, {{0x8}, {0x7c, 0x2, 0x0, 0x1, [@TCA_NAT_PARMS={0x28, 0x1, {{0x2, 0x5, 0x3, 0x0, 0x7e}, @loopback, @rand_addr=0x64010101, 0xffffff00}}, @TCA_NAT_PARMS={0x28, 0x1, {{0x1, 0x53e0b663, 0x3, 0x2, 0xa4}, @local, @dev={0xac, 0x14, 0x14, 0x2c}, 0xff000000}}, @TCA_NAT_PARMS={0x28, 0x1, {{0x200, 0x80, 0x3, 0x9, 0x7}, @empty, @rand_addr=0x64010100, 0xff, 0x1}}]}, {0xd3, 0x6, "c524e609445b859e6a27c4a7f8c7d1bae3701acd22365933aa623d0092aea7abb88d4a33fd111e60cafc6d66feb7413f7134371f62c6cd0b8263470d0ba0452e82f9a1b328278e5273a0aa10b3a8788ddd6d25fca7cf9bf7f1d271293885a7ca0e13bbfba90406ee5ac96c28add0007dfc422ad3e166b7b14ed6a7b4f4de78dc1d1da095e9487467ad4c761816c59cb0b6baced621dd58043ebab3f405bc4b8aa546a89a1d88430db16418a0f35019c642a23d5521ff4233957f2f7cb469d94ea1922947cb13da09f514e2d0c6b56d"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x3}}}}, @m_skbedit={0x6c, 0x1, 0x0, 0x0, {{0xc}, {0x3c, 0x2, 0x0, 0x1, [@TCA_SKBEDIT_PARMS={0x18, 0x2, {0x4, 0x5, 0x2, 0x80000000, 0x1}}, @TCA_SKBEDIT_PARMS={0x18, 0x2, {0x1, 0x5, 0x2, 0x3, 0x80000001}}, @TCA_SKBEDIT_MARK={0x8, 0x5, 0x6}]}, {0x7, 0x6, "e597cb"}, {0xc, 0x7, {0x1, 0x1}}, {0xc, 0x8, {0x1, 0x3}}}}]}, {0x12b8, 0x1, [@m_nat={0xd0, 0xa, 0x0, 0x0, {{0x8}, {0x7c, 0x2, 0x0, 0x1, [@TCA_NAT_PARMS={0x28, 0x1, {{0xffff, 0xfffffffd, 0x4, 0x5, 0x5}, @rand_addr=0x64010100, @broadcast, 0xffffff00, 0x1}}, @TCA_NAT_PARMS={0x28, 0x1, {{0x8, 0x1, 0x7, 0x6, 0x80000001}, @multicast2, @remote, 0xff000000}}, @TCA_NAT_PARMS={0x28, 0x1, {{0xfffffcc4, 0x8, 0x4, 0x4a, 0x7ff}, @local, @dev={0xac, 0x14, 0x14, 0x2a}, 0xffffffff}}]}, {0x2d, 0x6, "bc6b8bf5aab2e78a7ad21549aaaa47e86907c349180305192f884edef76ddaef91fb29959039f9d932"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x3, 0x4}}}}, @m_bpf={0x1090, 0x12, 0x0, 0x0, {{0x8}, {0x68, 0x2, 0x0, 0x1, [@TCA_ACT_BPF_OPS={0x4c, 0x4, [{0x7fff, 0x7f, 0x6, 0x7fffffff}, {0x7016, 0x50, 0x5, 0x7}, {0x20, 0x7, 0x6, 0x3}, {0x0, 0xf7, 0x40, 0x7}, {0x3, 0x24, 0x9, 0x9}, {0x2, 0x7f, 0x2f, 0x3f}, {0xa77d, 0x40, 0x3f, 0x9}, {0x8, 0x80, 0x3f, 0x1992}, {0x8ae1, 0x3, 0x39, 0x4f9}]}, @TCA_ACT_BPF_PARMS={0x18, 0x2, {0x7e0, 0x9, 0x6, 0x5, 0x7fffffff}}]}, {0x1004, 0x6, "d5c397cd6a05dfb9ee416af74a320149463645396597a2b5cd06857b94e64883c377191a7447f76206cbf62b17395679d6455a77d466b611090105e10df94939e349296f8b25ecdd8b37b4e38dfd6c40af43b5675aca6aebfcdad96640c38eecea90c6f401e9b1ea907b505fca515af7b9dcc62128b5f5a2ee0b923e73becb7b974cd10d12ac9313cfd146b4eef21c3c01dd074411543a973f53954abca2895e8d560147b79b9046a925bdd3d40ef8226dab1b40ea5c9e9988374e04011a06b012c41a426c2977702f6a3d8dfc2afa1fdda03e73f2622958151834764b2f447f73b9fe97f131a96d248594719da44dd61c2fb5a7f627c7042ff2de0cd46a0a8b15616c560f13cb83403735eb433c5e17b8d8c8b1fdf254beb05b1b5988465b96da802f1f6b32346ef1a83ba4ac4088166eed1a6dc19f4f069cfa1845641c9da70f7a684b46b6a8c4eaf1888220bf7a6a830d2deff703e71afa1e44903393834c75c20a296d822f70cbcb20d4683789eef42cbddef50b652ec5a24645b35e8fd5b9cda2b98f85bd786fececa8d18b3cd7d74431efaf42c08a0f98b0f1ea5ccd990f11b81a61a8cc0173b6bcf2449cb7530b3a8faceefb5161057b29db366cfb1b02c0bcf834b8ee7c3d5a22e137c1ad7d17261f40bcf15565b27c864d1f388c264c89c1e5f4c0e70f396afcb190c57c8a83e42d6253bb339feefb786696b7679c0846c55c86097daaa948a9cc496be08a95a2e51a96f8c4efd2195102f8f020d2b56648309aba5b2494966f0b096961412506c02616fabc1b523e6e5138d0629ddaa0ab3af3b3ded3c27f1317c4fb864c2dda0fa800814b36b1edcbbd1dee5f502d1bdb98f4d7bcd65b65158fc235ddc982bbfb70c10f38fc765146b4bab0ba4d650d463125ec6d6cfabbce5e7e0ad7661b1344771f4696c67a75cdf9a11bc4202100b3bd149b1f4cc1c47b3eef569d7cdcc57913e5cc93c9dc4d1cfcb5d288a1e3d22f9095a0e5e1dc56daf42d9163b5f7c8e1e5ae295add87d098959f033d44cae83332ed4f042536f930753f3b758e0454cdd23a2e582763cfb5b95319230ae0842ce4fde301dcc5acce4d748fd9d5472486b0b925a305e2f74caae64f1cf34fafe551430e68f45a10a64e2cf30ffd72f3da3a92c412703dc90dba3a4ee9d5fa249eb258c068e0a0e6af0bf686a67ecd9048a262bbb70ec5a08be882ce869bae99311ebccb048b6de6d80efa7c061728f6ccd88b48956d3176be9ed51eccb849b2da66a2f57e29a5b2f5f2f543c7283b807b08d075f309671301337a1bb2e6e0845f8b2dc80a81b702f7eeb81c85482e4ef0abc0fcb61a6a0554afd272a3133fe6efe695143c5f94fc2185333fc17dcda0b1249cfbceaa84229c8e52bd671c829982716a6ba2cac973786a20dcdb1489fcd7e375aeda14bee7ea1ad20dd485a0bc6e2f7b0730ccdd1fc1f9560a6d2f446f311adf4e440757ef8805b970aadb96041dba69a7a1e16004d9846c94cd33196ff0a14299731b70e15dc686b91a5c8733d9cefb7ad44cdb397a9e7fb7d86db92786f16d8b6056dc194d46c25d23afc493edceace06560576f6cd45462591c3e93b4c71680bbd6a5bee9d1ff1a4ebb7e71266793e66d2a03a182c808375ff1be0ad8f0615e11d06713042ec257e0a2c9e6bb6fbbaf48b5887a2ac52e0068dfc1ef2d5f0c79febb6e83c014dc176cb36409e7e4837cd2117f7f9517e4af3c26ce3a56b0f6cd10b3b1dab1dc35ee8c268b14d2d7110968c96653801a992a238230c611a1b700311c3f976c1a85e5fcfc153db9ec7f95fa9bd83a25d971708ab14a19cf53fb0ed00c19fa1b77ccdeab51027300c7bac430215e32e3b5961d26a1a477e9cbe4c38428b1f89a7987a9d9971c4da1def147beaa14a0579d01fc471eb0550513fd53a1371b90f3cdb7a3c1b8f2e60c84d56f672502ab66c2d69c326bd34b44a6f450e64858a41953247056a8cc23e4f7d67f7515e64c0c16c40e18c0ae58abb92ba557aaf3115bbd16f10c5a165cbd31b1c93fb2b7171efab569d6ad3211891d2a73bd0e551363ad93b59c1679f20ee92b10056ca24792ba91e22f95a931e2722d19e904883e1e6a35b16786d8a9a1f8a574f7b666a6cda61ffd17c7a7061da34d0412cfdc6786fb354b1004068f523e6ac282e91f368525370a2e2b4b05e879e935b064a12de9f84c1583c99839ff6567386e64093a7f4fb68d5b79f2b4979cc2896fb544fac13673fd39a83b27e95337868d4e9b2a62ba5662be6293f430178ccb5b7c92ffbb2aaad681e7db1e85811a2e207e56dc8949d725d206282300ebd31a77bf158bb020328178c86ff005ad4e9531464c322886e8f1a47017bdc42790499ae5f7431293cfde6ba0f48826a73300f132e46dafdb74ef5fc6829c4e2abc111f1bcb055ee157d9189b4120e035fe90d3157dc2e8b710ecd1daa92dba256ae6e7aa2b8b1991d5df9dad07d09875599a43eda50b0726a195a32690b19a0257b7455fc96ad590fe009fe0ae6275d95295ba9b4a072cb5fb9e2ad2f433ef5fb7687ef16ddf4b613da4be923432ea6cf6bb472fe43644cea7893b5203efeaf63278ec1f3fba16b53e42b4e68ec48ba8a62a53d508c5145a6097806a6f4cc9a15469a3cde50844e8a0525ed3a04de0afad876733b6b8d82d9a3488cc01ab72808feb3a3979725355288376d57929eabbb0043bd6f1f841473d59221d4059bcea70d3bde23fe6abaf30c189c0bdb99bb56e4ff4af854ee1149017fd1bf5fa9b24fbd4370cc2d73b677f36a751ac8e21239a243622c732c7357e8a44ece685c6d585e4d82cb997160c2766949dfd8e2590a4dffccef738a2dda0eda04c06b389c41ab04536fe00c81d18adf0b9834ee24bcb9669fd77e8648bf9d5add2f237809f4c10a410afadb4bf6a98b4a4c5021d1a6fbcd52039ff9adc446c3a67c317a41785dfbfcf6e02a17a7f0f0f14fba4e8906a292e52c60c6136b2550a5bedab67d9adb9fa2f3c8fc04aa5719b87fac18b3363d753c3046a2e7bf2ce7ea9d989c6c9e55deb0861a0b025421efe20060110d9b9fe4f4473f8c20ae283fd688911c6ad451fb980d0b22eb44577015e452aadf471fe21590da1a10111ee241db4eed02881cacb87ae40788beb11ffd83a7b7bb6521b538ee94b20128c513cd074ad79f43d11e34cad7752fab6f653b4ddec32156ab72a255ee11efffdccd6ab5bc2b5b6da3fc4f31209cb71bc0b8d94d52d5abdc40ce329a4aedc3d0d77747255d1789f5ceab23402d5c9bc641270eef2d36d7f30202521d39764c5ac3c725ed17f278700576700ac52906fbce21a41385c8cfeb2c8cc65da752842102236a0c695a63f47b088f7bd9c9c60365136b8be6384b96e675444376844bce8d0438fbbe921429ce1c8c0a34fcb6b20b161b3dbc3f5bc852c8dcf3fa56e58ae7e70010ff0f74251a61e8eff8aad004d976a8b2c15ba6cd898f5c245e50c8f4bc5e170ab0ecb015e14cb06625189425470c2c56a959e7eafe8fe823f876d13c073dd0183acfb0ba18d54cc013da148960227b6f95f682af879e7d544eebe8f53d914940d9ad6a2e8289a690b0a914ec0878c9465f7782405cfee4a42f31e5c89ba27838f71515692fc18a500df155b74b4c8200077f513c3930d7966c90b4747a8fe5774b9a8f363084cdeb9e9a48c2b40da6fecf610cb66fd255e384e2f315d5d2f20f4c120a7aef7ffcb66a39340f000dbb9eea8e071d6b0adffe022279535e42329f03f03d0ca127804f1eb28af549d740891d80490ff710201c03d2c0fd75a1824ca80db7a9c45fcfbd1e020ee68be926ca6f780626c80f9e49b60c05f5557f2583794ee6825d2c831520530b200e74ddc3236422fd8b1d8cf7090cd7c10d21a9b4e315e83dc9bf15f23f0acb032be93e3029485b509f1d1c8c4c34784cc936ab1c8bf0dae4ab2f9b88773a30306543759bfba8e47a10981a70bad0ca33ce5a44183351ff9efd1569c8abb7e096d899361fe0a27a471e58a01f120f2a1adb0853dcd7a6b3224a7dfbbf1b9c3c13e9b5cdc43a6ce13aa8e2eabc0d7e9a25802ec3532a2adeb0b2af0386804c0ec3cebf1044d4e7fe4b39120a37942e0a0b76472318b67483d5c89e5afd5a9df8ba8a5d216fd5dbe33f1ef344b9c56b1d4ebc6fb4688b96005e86772af5350ace9f2f19b5180577e8e4369aad771fa2330109e5e58024d37c712f577b68037c1f6b18c10b1bbbb4e5dd07a16ff9a97c21983437709c65379de11a1727b1f3cfbed2d7286f55e43ede4b0696a310491485b2ea2bfa3d4f9d6d75b2780d05372c3941b371f97c6822ce5473f753e9e9402eec9e8c5e2678bbced5c6c10e3f995ef6013feaa0ed621b4cfe2ed549623f8ce3e7d4b8a160efa15c931c6b28c3e926988a4d4677f9d36a464ac0c7c24aab048169a52306a761a49b73b44f83ab65126aa29e597512caac7b2baf974322b87604aa6636e81759fa411e0cc3721828f9d5455808573a8807a0bd06a034c79e1f12734a6fd47cfa650213c3dcdee3540e3dc15c536e83d9984c6020ec6aafe8382094621128f13a1ff2d854234107a043a1431ec584c01e3bf0d78bff558d9a8cdf6dad5b500f0be8c1870487e08ebc34616b6f84c09e2cae4795014a232b84c4cd247469650763abe8a29ac653c122580b119c10f669a545b4357540c1b3114d219ec0d8f562bb5eb51b70f71c744ce4703d617c4f818a9ba959f353c8174c61200332d9af94035a5906ad34c7786d0a900fd102f8cab93b01fbb038f774f8616351c0e862f3eaae5ef17a5c503974db34f6bd2abef240562eac1bcec04e7c2f4628fcd271a874ca4b6831196ffedaa91ced339c46d8b9d4316f66bcd284274314b05dabd53ed14b39eb3af26f4053ca22fc3918340ed9eb9fbb06ae98ec61ee23cdf01fa688bea848ff1f3462faebf6fbef438255fde9286b2bea6e6ae525aa5f279a31ee3b349201c975d6fcb01b833759cadebb35eaef17d07a5931b5e9c9731ddb23ed5c242cfcde0c2c4f68364353ec4e29817a8f5c801f3d383d7373555914f771a11b3d11b6e38cf0b8c53f20a84a6fcbf4a127912bbcccd0f0c5e07e8ca464d208b4c9288cc3611b05bbd6fb44072b923f83d2d1955fad5055109cf2bd8a3bcb30386816b0b473cd4e2e9131a316d3ba64cb26b8f2c2f8ce463989a3c7d3831b4137be1f2bf57d1a0e599d72208613adcfd3c3b1c86a4d7a7baf8bce90e195d50d368a16675dda9f70cbbabfdd5206d595a8a92795fab2fd3d0a54e533d82f3fbf95b3655f440a8a49fdfecc80c2176532713539c3f89652571b60f09162163b99afe83a3677ef6c8683ef0affa72fe16c7632d55f0869aa9e8acf3df681c99f4bb59afd02dbbd593c95b4179adf6149b7c6e4708d2dba66dc279056e7b5fd672abb9a7224e4428e4fddc714a867da8c41cf572df942d2d27e43420e9e5595cfdaf089e9283034ad5facfa41d9ec293560255300bd9e1d84468a70942fc87090a9a33fbb843026012177789814499b3234e58e2c5c360535806f7bc2d063ffc92d1029ebd463ba0542931df9e1294fd29cdbd367691f4935215abacd75635fc331c33e1cc875af14f2e8aa2bde61e49eafb6618c122786c204792d2e11ae4a69d98046f82900570336b1e599ebf102de320febd2117b12cac8c61d34f58d94890db73b70a20501f65131c47dddd50e6d1453741999ad24c1e0add6f3cb5d42f5204db16cc08a0ac8faf244f23a3f99fdae2"}, {0xc, 0x7, {0x1, 0x1}}, {0xc, 0x8, {0x1, 0x3}}}}, @m_ipt={0x154, 0xa, 0x0, 0x0, {{0x8}, {0x30, 0x2, 0x0, 0x1, [@TCA_IPT_HOOK={0x8, 0x2, 0x1}, @TCA_IPT_TABLE={0x24, 0x1, 'filter\x00'}]}, {0xfe, 0x6, "1c99289766349766d6fb510168184525fdeb9ca5cfc8cddd4807de1f21b28ffbfdab168537494ccc1ad0bcb4c5d61f7450fd926be0abf90240d303fe47a44b155989144aacb23019a9e560b58875604502aca3a4ee10845fed4a1059f4704a4c37c65c5f4358feb67a61a3d24d717dd224085e4d4f095aad044e568af35003949bb11de9d3cc065ad6d6f0c5415f01f17c78e85211466384dcb192b646cd178f3eaef8f8da5ed00555f73aa1b939e79eb77743ca59dab6da445c222e8adc9654767838747d34fd788da596f4993519af07eb283c444346af4a9fce0218907a0332605feb938b2b8b00e5c66d1f34d3e1a6d8d7bd931527087bd7"}, {0xc, 0x7, {0x1}}, {0xc}}}]}, {0x504, 0x1, [@m_csum={0x130, 0x7, 0x0, 0x0, {{0x9}, {0x100, 0x2, 0x0, 0x1, [@TCA_CSUM_PARMS={0x1c, 0x1, {{0x0, 0x7fffffff, 0xffffffffffffffff, 0x80000001, 0x3}, 0x3a}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x4, 0x3, 0xffffffffffffffff, 0x400, 0x6}, 0x28}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0xfffffffd, 0x2, 0xffffffffffffffff, 0x7, 0x408}, 0x20}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x80, 0x1ab, 0x4, 0x1, 0xd78}, 0x2f}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x3, 0x2, 0x5, 0x7fff, 0x5}, 0x17}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x5fd5, 0x1000000, 0x4, 0x7, 0x43}, 0x2d}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x8, 0x2, 0x1, 0x0, 0x7fffffff}, 0x68}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x2, 0x8, 0x0, 0x7ff, 0x7f}, 0x4a}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x4fc7, 0x2, 0x701d22f88279bd7c, 0xd811, 0xfdf0}, 0x76}}]}, {0x7, 0x6, "a457bb"}, {0xc}, {0xc, 0x8, {0x0, 0x1}}}}, @m_skbmod={0x17c, 0xf, 0x0, 0x0, {{0xb}, {0x5c, 0x2, 0x0, 0x1, [@TCA_SKBMOD_ETYPE={0x6, 0x5, 0x6}, @TCA_SKBMOD_ETYPE={0x6, 0x5, 0x400}, @TCA_SKBMOD_ETYPE={0x6}, @TCA_SKBMOD_ETYPE={0x6, 0x5, 0x4800}, @TCA_SKBMOD_SMAC={0xa, 0x4, @link_local={0x1, 0x80, 0xc2, 0x0, 0x0, 0xe}}, @TCA_SKBMOD_ETYPE={0x6, 0x5, 0x817d}, @TCA_SKBMOD_PARMS={0x24, 0x2, {{0x5, 0x1, 0x7, 0x1, 0xfffffffe}, 0xc}}]}, {0xf7, 0x6, "469adb2e2cf41b85321c7b5a23cfb528ffa3f286d09af62de4c0437ed17e4bc9c0df7e91f1dd947ceafd1b3fd4482281e6b6beeb2e830983a1d8fcc1165bf74a9337c5d61d8de7fc97a7a88ff350763277657a4d4974eabf8c6afb00c159e410f71cbf97f49ef1d6ec8595a0c901694f158703dea8683b7337e77c54fc95ecf6fa59f7ecb82e4bfa7de9c4f19d7d17cc0aac8b5f4f27346cac83a14371745ada93c9389893e54022226ce33f522e2f30a08bb8f1e2d086a3a41dda677a7b90d8c10284d939365e627baa0f229165b37855d9075ec9a3215620d0e26e9c81a5168e03546833ea5e89fb0ed7d07a34f44df43233"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x3, 0x3}}}}, @m_xt={0x254, 0x1, 0x0, 0x0, {{0x7}, {0x188, 0x2, 0x0, 0x1, [@TCA_IPT_HOOK={0x8, 0x2, 0x2}, @TCA_IPT_HOOK={0x8, 0x2, 0x3}, @TCA_IPT_TARG={0xc1, 0x6, {0x1ff, 'nat\x00', 0x1, 0x7c, "a36265120b7e186ca73fc759df8344fc2da4c0e16638b260c37046bda260fe3105d6fa10b57dae25e049be42891968806e38241feb33b5ba929cfc8955b9b9e60bd0826593b77951e9dccf0fb6d938c12d6868dbc6ab182fd31f0694b458c7ce30ce63bcb51c3d90c82cd09197ab7fcb15fe2d30fd649858f9435da9dae330e353d73cdbb61e05ffbffd05c881b3e94dd4e1ed0dd0fce0"}}, @TCA_IPT_TARG={0x65, 0x6, {0x6, 'mangle\x00', 0x1, 0x2, "500437c4c1b7f744d5efe01b98bae1391531f0a2d3e85606b333f4bb365f02efecf2709598cd6ec282a516b504c7da9e4d30e4cf60aa9b9af4d7da"}}, @TCA_IPT_TABLE={0x24, 0x1, 'security\x00'}, @TCA_IPT_TABLE={0x24, 0x1, 'security\x00'}]}, {0xa6, 0x6, "e1d0c5568381ce0be90c8f727e47e1c9f19320379a4aabc4987eea4427f9729077f4ef598275cbcdc89aedc6277c313141bc107d81df8f22e26a77fab298edf9296c402840d753aff241a7c1c33077d1ee94471d8d7f900399b87efd3b6008453a5202861c54850aa152286831a718f02d04168bd0185fc3cebf3aa93bb1dd5ac99520a71faf5e747d3aa068e5e1ec548e3054a433da1c9c822337599fe2f79c3c15"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x1, 0x2}}}}]}, {0x16b4, 0x1, [@m_connmark={0xe8, 0xc, 0x0, 0x0, {{0xd}, {0x20, 0x2, 0x0, 0x1, [@TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x4, 0x80000001, 0x5, 0x1, 0x4}, 0x8001}}]}, {0x99, 0x6, "03aeab77c6b6512230bfb4a4ac9b43e3dcf0d450876d33959dc776e07a78196e3269bac6d8589a252afcf9539f70af09178c9c32b5d5968d9a4b0573f10b524b8e4cd19caf58fd02402025b05fd8e46fcd1c94c85b0dc75fe271aad7cfb111e47a70f5dfffdf94b6992422f368abc3fd15a707470c933185cb19812aca83b9611a926a8937eba6778a011e35bddf74b85861922719"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x1, 0x4}}}}, @m_ipt={0x23c, 0x1e, 0x0, 0x0, {{0x8}, {0x174, 0x2, 0x0, 0x1, [@TCA_IPT_TARG={0x10f, 0x6, {0x8, 'mangle\x00', 0x81, 0x3963, "fdd7875d02135264942e2e13ceb04c4abfc1c38223b7750262be842e76f650d9a8d6814ee8c83b01075659f5806c9bf17132c3f7898e32c08265853f9a5f37802c4d62eea01ac9c7458c96c094c6c161f3541fcc90630c5422c67169acb00e8bff2a268bde93a0dd5589a7f9cf35f5839961cc5cbf769820644ae0b07d94e45f23902b5b3ae7731f162f0144e8bb7153f0ebf787387e3cfcecb4fb96e749c5b849e0dd4bc4ac4647692857192301a035d92651455bbb9359345f10634e10d8405ee8049471621d9735138ded1c7fa59de5f150da3606266fd3e8fe91f90e032e3f0eaceb73"}}, @TCA_IPT_TABLE={0x24, 0x1, 'nat\x00'}, @TCA_IPT_INDEX={0x8, 0x3, 0xfffffffe}, @TCA_IPT_INDEX={0x8}, @TCA_IPT_TABLE={0x24, 0x1, 'raw\x00'}, @TCA_IPT_INDEX={0x8, 0x3, 0x400}]}, {0xa1, 0x6, "ba7efa3f282338c36e76b0cac6a7041260e6223343731221c4739f4de0c089b36caded50142fdde413819c12a936da64fc0c8d9cd8ecbc9a1a501aa5a6f1038a90793b3c2c440c0b86651fd1b5864446dca51e15d0fd81734bbf6bf6b9322003d8d102a54af20085485c9d7bd138667551a0ad46e3ccb501125791f70dfece56fae62212400d45412bfa98882c215c077793b9ed54d28986000009d854"}, {0xc, 0x7, {0x1, 0x1}}, {0xc, 0x8, {0x1, 0x2}}}}, @m_mpls={0xf8, 0x14, 0x0, 0x0, {{0x9}, {0x34, 0x2, 0x0, 0x1, [@TCA_MPLS_BOS={0x5, 0x8, 0x1}, @TCA_MPLS_TC={0x5, 0x6, 0x5}, @TCA_MPLS_LABEL={0x8, 0x5, 0xb315}, @TCA_MPLS_LABEL={0x8, 0x5, 0x30021}, @TCA_MPLS_LABEL={0x8, 0x5, 0x8e9ea}, @TCA_MPLS_LABEL={0x8, 0x5, 0xccd5b}]}, {0x99, 0x6, "d229cad884cae9affc3174e07f4c784fca055c4bf8d0398be5a871d4e5365bafbe988159fd8ae79523f2338ba4c88ae8938d8da1c1723a1e54156c1b754807c8005c931732bb05a2c2b5bccf6109a252c0993da752485a7bc9c8396c426b2a6dda2fdf0766313b40acb6c878394c31449a01ebda859adb01c0c15ae91f757cd307f7c6417699fbefacb1186354412a630b4387515a"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x2, 0x3}}}}, @m_xt={0x184, 0x2, 0x0, 0x0, {{0x7}, {0x12c, 0x2, 0x0, 0x1, [@TCA_IPT_HOOK={0x8, 0x2, 0x2}, @TCA_IPT_TABLE={0x24, 0x1, 'mangle\x00'}, @TCA_IPT_TARG={0xb1, 0x6, {0x1ff, 'mangle\x00', 0x7f, 0x9, "9e56e3ac8ddcda53596e90ecc48c78d65eddcdd5ace92ae5e2b159c3087371890895391f0002ae841d5a894346f1f3ecedfb1ff4a73509e7ff0617ca4f0f55c9ba55022f2cafb884ca418208e2d2b6b0da84e1b1cf1fdc132088a8eeeb33a00729548079410f967c5915754f3f11ad6d85365cdc17dc726f47a66736fb7708ccc3004c8c5b525c"}}, @TCA_IPT_TABLE={0x24, 0x1, 'security\x00'}, @TCA_IPT_TABLE={0x24, 0x1, 'security\x00'}]}, {0x32, 0x6, "655222f21dc8c3c3eae5ba5ba36a4a84581afb66b18d7a51e0154c419483f86ce93812b0483f728d2fb8f059455d"}, {0xc, 0x7, {0x1, 0x1}}, {0xc, 0x8, {0x3}}}}, @m_csum={0x1110, 0x5, 0x0, 0x0, {{0x9}, {0xe4, 0x2, 0x0, 0x1, [@TCA_CSUM_PARMS={0x1c, 0x1, {{0x0, 0x6, 0xffffffffffffffff, 0x100, 0xc1}, 0x1000}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x7, 0xffff, 0xffffffffffffffff, 0xff, 0xce}, 0x70}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x7, 0xd3, 0x3, 0x7, 0x9}, 0x61}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x6, 0x5, 0x2, 0x4, 0x3}, 0x13}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x67e7077e, 0x2, 0x7, 0x795, 0x7ff}, 0x4b}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x8, 0x2, 0x6, 0xdfe, 0x7fffffff}, 0x1c}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x51f, 0x9, 0x7, 0x2, 0x92}, 0x3b}}, @TCA_CSUM_PARMS={0x1c, 0x1, {{0x7, 0x0, 0x1, 0x0, 0xb4}, 0x7}}]}, {0x1004, 0x6, "a4f6744c97e0549953a6b9668c2ee5bdbf8f6d8eb58f0b6f506dfd7f25f826d6c616ee0326543d40357ca355c89495d320bb884bf4e3a5036ca4301aba26ee893e14bb0a33047f593a0e715215ab31488849a15db369778d71b15dbd6da53524468c5a59dbc31f3714a742050e8e42eebc527384d37f9333a1cd74d02419861f8d5f4fff43cdb70694cde1c976e9ec4312de161f8872bb87ea486ccacd528b495c13a6f054cbb703dc38594629fee566736739aff2bd5d82e7c01d3bd4e8f1e5c6c68baf7d585d0da2aaccf3a77e87ee8f7b01e078860b3f562dc4b16c19a69a01b2b23faca1d1418ddb5e029faf004d2e08ecb26f2f569853214465a6129be99d0dbaecac718a38aa6e0a4f026f8dfcf3df96d356369abc2b3c0d1465757d3a9ee20fab22069f4a846990c4c8779a209c9c857c97b367448323d0ac3ed69cbca5ebb86799865420ebe3e0828cf3daeee8fed764d0bf1466fa87c008ac4c0157493de93febf8c6991ca2f9242b2ee5adbd660621b940a5426cd983120e69d79f993b71aa40ac0655b262c5924b3cce92445f72537cee84a81c5d9f4609ca8649e94f9dd8610dfc6cdf90ebc2f09ccf5c4eb3bf3c60d35b94045e3e05b6610b5e551689e923494b0a6f0ff64b8e536e26e0458f1757ed71d1d9f6d176ce3c23e0cf52275cbf8d010c04b9c41bc4ada9f73fa57b22a452c38ed7f659de11d8f281f5bdb9887e7a36e84f3cd33a39c4159e8b67d0b7a64fc8b0ac831922256384b61dbf0adb4b13955a97a4967673c003a2bc269d573707762e6d12d1e81ff3b1cc7242c1072a979353661ca4a687fd43e16983a6eb30b298427770c8c337c0e87ee45d4b6f442558670db3ed4d379109c1e6b7cb9ecff66816061a221f1c5104abc4ac6a2c76f680dbbf102f4ba7a33df85b2187658ee5af73f9e37d7fb93ac50da6ff6aa558fb48beb68eafb5f23a120e938aad5042d99b6d1d3c4e586825c115e9a90c9aff7f0b6f3c46634c93f84fc8b1c89a5c69b77cedd8e3e01a641472fc863045d3e50ef3059ebbc04760bb7f35bee571afcf7a14ec547184916d4ff6050be595d0ee5b569fc7313d83dcf73e65e92f6dac45dbfd1da86c78a7232aaadb6416164e10045c6b5a7977c0a1ac5213311f9f82625652eb875c55028f1a5b05556e6649f809490309cc1571972040e0d45ff53137802fe774ab814bb823e6e0ef236f535cbbea42b082bfca32c5bcdc5e7b30a2a5dee183a31ac852c2b4cba8a3f562eea46f40ec7f97deeb97b671a148492801e0dd3b04ff8fbc6d22435fe2e3148920ece2069b56da6509739328ad0b7e633a646c62424576965b01f4925b4c7cca98480872b81017617a874dd37f7cefcbc6dba5c27748bf5971b66b98e9032273db086f0c2e4f9d752c5633a6b75bf983c1c169b710633e02c911e60468855bbed3bdcce6f37d4033752e3cac45b9ca22f6a449ef5afe0536f3e4e855937c721891f591e66c2d9fb807fb91518b20f42e6e3b81d535612bf046fdc48c2165597b5e0f636357a54b771aaf38119aaad0900a9cbb9721fa63aa274989a1369e8ee873ab6d687f2447e5ba10cec17247bfdb15a2256b97c6caa5e52eb77bda9b68577bb53a3879283a913b13c65788ce414d9af61a9daeff517ffede4882e4e30a5153997431a121a436436a6525c8668affb9d284257d0b1e9d9dca5c120d0b57eda860bdd370f533c3ceed0ab57812181a9a8eb11f79d233367dcb8cf9982be61c41bd5c808287780b0015f463f40a8a8801c0b8e30511cd1d2f661c414db7c59edcbd09286d9e13f3f40424f598f332ab01ac583a0430cb989c9bf05195eec6652175afb92ba16152caec34b0075c16cf8248717ebf4f0db91c1c6d8ed8bb65c7147d2f8774f1227351027cf94a07d7094bd92450b7c82adde469a63e1c8040a07e66f35b5f5237e7871e87ef8a650b3a2e4fa88a11ec9cef08e5baa926885e0dec1afe90c6103e8a469181cbadf93aebb9d20726c0d57e3d21556ecbacf6348b45a43f1da77b14c2171e1f285744d83b88d3c85ba54ca3efef66514514ecd5dd5c75bcc7d500cbe9d062863870883cf992864141778bbd868683b8b2596e7507bf09cf9e9e10b11ecd333d2d42e254e8fc1ff860801143e52d4475d6460994ab74e4c75339df77d691c28fca4ccb1b000348204c6ee4356ef5e9977f4214bdcaf40a1f104a1b914520ea6e70ce85ba8c40c6382702ebfbd30835851db429d0f1d8f2549fce5cc539104fc225e9d13f27897e3440ebb2f3ec843807e57ea6c6aac617c017cdf8fc9f0b2016ebf8e6a23645c8564e5964892acc6f731b6bfd7e9c2fbf3614b7b0a4a4ea9a3745ca1bb7cc496ef034f2f20185870b1ea330077599fb1d355fcb0b4c98ed5a6609767d873f28013c61180b263b0ca0bf55c6e52c4ce7f63c4880ed325dffbd28c38dd4c8eb7f82e0548d9fa5b01787de60d3d9e0b8aecdbfbb69b21cf9994cda40819bbb10fdd9a77d9df090affc15b622114c33a88171509a822447874f93693d3cd0094e7ef909455f577d14a0edd49cd5a3b8c360c11b9041a1a8035bff9f10925a2f5759a62998e27e510e11ace3079cb4a2fb4a9c47baaf7af7fbe9fc987a283eb2d1b290d55ab19b7af4a02c9010ddaf701be592a7a1defea6e8f1a82d9fd8b152c7561e51c4489990993afc72acdabe4bc4acf48182efbbb6541700207b7882cc00e4a03df564c569c0c058290f012fe2cc4978f5642514655287040469aa0810c0731f6ded27f6e931029398838ab61e06a51b5c9bb44ff2ad5a94904abad2205a9b8f814d341cdd3b43b9664e4cece2b4057e12a664cb2aec4298528b5a0ec5998ab5f37f90e7ef3d4f08188737b62601b43df74ef8b2b3a7cd1b3e0e66ea88bc7b4acdf6e7251336eea1d97d2a12ae65c33bc0df452a404d7627264425119d4b4aa202931bff272e8614b2f87e0861ee612844c8a9949c1f424fda25e9f19c9f1b42fdc8ccf35f25405ed3df83bdea27b67ae40e3a28710bb72ff7dddbf8af746305de1586146c9bd1882870b3967b47d1752f9ce0d9dfb183a86c4032944b8c8be51c836deb8ebf899c1ed4a44873a5f239400b5229ff7a10827e12e041576b4b7eb9e1171900e98a8ee9831071b2d6d833373be52495f452197e90692ded63cffb6ab5ff3e7abe1dceb0fb47386e4f1ad2a439fdd8c0c7d67bff306a04b92b22f6b205986550b2f7603b8393cfa8c0b7c015bf8bdd80dc97b5118f04bff54f03f19f105a1eeebf478d7005f493366fb2b8924de41a8313f416646652f2fe2ee7a38f4d7eebda58d55488f91e05c94b18095d6b4538f9c17b615434943a185fc808a6e1b839439935f971eb721c991d106cdd7fec147aafb8a99c10b08c0b6921bfbf4466b73c12f98bcccc8194bb10357f9520b62e17c89b7198bbcae3edd5edfb0e121a32e8f15d66b008f4d48f9528d29fcbfe65e678bb22164713571a2635c893f1a7cd1ea0a7a1935bc2505c3cfef22097dfbe01f192b82dc134623809f529f87fd576bdcff1c4c5584fb5f19958507d5970b69291bf3d49287d143853f057c708fe67998020ff8a01604cd003a4185ad819fdb7d8b7fccae68ca3901be025ad3cb5740f836f30399c4623c69a6b5f8c535f4ccc4854d8d72bd814eed2ff3305c66ac155c6cd5a65b05334a704acfa4af419a9d71cf028e0e208e39f54010c89ee53357648c994e88ceabb3ccd9dc0579f936ce011b57308d8d5fbd91567de62c4a4004b68509127c6225ef2cfa4e19e056ea605b3391fd0f1de1edf05b23713b120dd5d9edc4e6a17c32cd24ffd31c3b7a6b7512b40378c99f99173abd8269b6cbe5d8a26d68f029a04fd7fd0a01ad6ce1a19d8d5cf9e7f6e5372c4c041dd07a6ab4c59d37abb28e157b0a90a7f24c6d859b42740785e04f7ec9a417d7e74d4eb44ff751c2edd12ee70f1b92b8d30631a31fee1aa039e126b42b7b81b815df972f11ae74266a080da573e7a684f186093614a69619ceec1a3c933869595d5158b91d90bea1604d31f63466408a4a36c029e2b892c41cd8bcfb40c5fc9fa5fffe3596a5ccf7d2cac73b1b1a4561416cc15cbd4110dfedb23ed4bdf0a3fbe3cd1b1be812b289e2679fc6b0c6b8ccdfa2b47de27133f7fb4f879b9208cda73332f1df049feda08c119eef7a86af80f10a259e9199ed9003182fe52dc8c9b57a4137cf0e9508625b123ffb693ae8417b5a58780390d42d0729fb2261066a090605e3c85572bd2609c6b01030f82f940d7fdbed5c314f5441276f2c4458596058d36e10a41a585a72866f9cb7c306a4f1e40d3502b77303e442525458a18eff321220be7509e884c97f9d4bc9ff4b60da1f5d930f30c79fa9462b43c90486f924797350a1e1a5b2562471058bbcddfe6132f8641236bca285f2c51339c9a54606cac6b6a38aca92bccceeb1c851004e2553819e0114313cc4813381741cd857bc253818adb58813e37776a11117a769f4a9030fd8ac980007551511159df6ac0d2c0950834eee199e4654cad602539d5bd2749c7c866ef02e8a421d296fc6b0498fef68e6061686ee9be6cb98ea92a5cae43a6e8d1fa05fa2d89f8fa8fa81662797a4988cca25dd4f739c21019df0b822f65cde38a0a65f1318e491fe215353153881d9b3262f143af2a391f979583c8c945dc985b6de5bc34c1608a3f270890eb8aff31da524ebf40de2a22c55ab90acaef3c24c0256e27cb064d46815f36941548a1258ac786352275ba65e69ac794581ff1ea325f93b92a5934ab9a384c54bbfe7485b74099a83a4d9d43edc82784c387dd694ce3b0473e3710a3d4a19162dbc72a34c853a9b44c226a7691aa1c9ddb56b4ccf43cf570ffda419d98a7104bb4d6210a0875e084d494b4987562840e2dd55fc091602fae24b41a580d2a1f99d4a081b42398e41cae5c55d376ec1181d1aecacba06e5f4392d6094cc4ce68662d5f012814f9982b732af505661c75108ec159719b0b2112cd70f2e39df3533fb14d5516e26513270a2085f28adb2446d4ac72373a0c6223c5ad7c15ac32ec27db4636c6e9e29297df3378f339c43ebe35720b383e1d23c05ff2053b8966eb19680775935092860a92cc095cc97b9f0d91d31d653a7f0c99ab153affeb7ca055ca3a2a3290b707bb202425b0db04886a948e4ea0fde77edecd5f93275bdcbb49a7cf6f2e08d42f474113f4b0978baf3357b5f81e40424e3e16015ffabf003c63351160d3e74f6257b71032fc6b778d237a3a184575d39d3fb126c85133c9c2f0ba983f976d3b8cfc7c10d09f69110c74d1ee8193f737642c2fe77397a0fb67d4d55238a52321ac97bbfce81bc63a6eb3f65d5d8cf294fa0e7dc53b4fe4d0dc48d123c98e5f7da50678191d5308d7c91066fa141e7dd168fbcb2cf21ceef5e0bd44ca3ad299a37a4be132670bbfd68e9b6b822f07c1eedcc021d82e079c5027b8397ffad7da9cf11afa21031f1b95e81a7e36ea009eb584b799dcecec72ffacf296af6f50c72cc3bbc3e435d1946e446a9aa7d2859f76743b7d81af65d8e39573f593bf29f24ca7bfbcf782588214e78638e3f32f5376fcfa4d8e59750fce1fc47bf8da83e6c67770b3d06645958694958326fc92337a088db0ebbbcd34b7cb4822eec0cf850e7bd88c4e69b5195b4e867898028b7b1d82cb348db3a541fa74e83cfb224ae7a0ebb14e995d0441bd99432a891f2ce21892ac71fc9a80c3d685228a18686601d219cfa2325db0"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x2, 0x3}}}}]}, {0x4}, {0x1928, 0x1, [@m_tunnel_key={0xb4, 0x9, 0x0, 0x0, {{0xf}, {0x3c, 0x2, 0x0, 0x1, [@TCA_TUNNEL_KEY_ENC_DST_PORT={0x6, 0x9, 0x4e24}, @TCA_TUNNEL_KEY_ENC_IPV6_SRC={0x14, 0x5, @rand_addr=' \x01\x00'}, @TCA_TUNNEL_KEY_ENC_IPV6_DST={0x14, 0x6, @mcast1}, @TCA_TUNNEL_KEY_NO_CSUM={0x5, 0xa, 0x1}]}, {0x49, 0x6, "809f09e7aa5c2a4f88693ffae10381961fb1d16ebb68320e536ef575e92917a2cbe501ccfe46160c06b2c2330cbe6d2e4f8dad7743c2da818ec756eb23f0afdcb197985e15"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x2}}}}, @m_simple={0xd4, 0x9, 0x0, 0x0, {{0xb}, {0x40, 0x2, 0x0, 0x1, [@TCA_DEF_PARMS={0x18, 0x2, {0x6, 0x588, 0x1, 0x0, 0x4}}, @TCA_DEF_DATA={0xc, 0x3, '\'*/^]\\]\x00'}, @TCA_DEF_PARMS={0x18, 0x2, {0x4, 0xe5d3, 0x1, 0x5, 0x2}}]}, {0x69, 0x6, "70d44c04a4b4b9fded90a2090a07446647db331958f89dd8fc6898152566c265005932d943215013880227d8a8400c78dc7f3b947cf2ca9fcdada9bbca3bbc6ca7548a4bdadc804dd77631bad04ab9b7921c01a1e0d830aa44f22f2c9dfd113950d0dc4ae0"}, {0xc, 0x7, {0x1}}, {0xc, 0x8, {0x3}}}}, @m_sample={0x188, 0x18, 0x0, 0x0, {{0xb}, {0x5c, 0x2, 0x0, 0x1, [@TCA_SAMPLE_TRUNC_SIZE={0x8, 0x4, 0x8}, @TCA_SAMPLE_PARMS={0x18, 0x2, {0x7, 0x3, 0x8, 0x1f, 0x7fff}}, @TCA_SAMPLE_TRUNC_SIZE={0x8, 0x4, 0x1f}, @TCA_SAMPLE_PARMS={0x18, 0x2, {0xa5, 0x0, 0x1, 0xd3, 0xffffffff}}, @TCA_SAMPLE_RATE={0x8, 0x3, 0x7ff}, @TCA_SAMPLE_TRUNC_SIZE={0x8, 0x4, 0xfffffffa}, @TCA_SAMPLE_PSAMPLE_GROUP={0x8, 0x5, 0x7}]}, {0x102, 0x6, "b4ffd523d196d18f99019fa38bca1aec6a654b81befa6ff1cb0bca72d6172b440a7763bd9f10e03f52bdba4bd4bf33e87a302342b4a92c8de405caba3444bde359c099e1b51d7933e4991d38fc24e1f44d176d4c23da8ac594dc460a3121239b054a6f3c77bba349f7bbf42b57f5bc7540ef80d24513c6d6d56939382a0af4691d417c48b14550914f1862029cc4e756265248755d9b2b1012e5aa1f4a658c1d321618bf94e0971765756c74c7366607c9e98a508bd9f401eb6b083e5c8dfcf6024c41228dbcf38a262c81a26bb46080f9e2524dde68cbb8ad83f6d00e43ebde23708d7edc2ae7d9feafbe8f23b2728fbf4eb8f9af49edd3dd25e874da62"}, {0xc}, {0xc, 0x8, {0x3, 0x1}}}}, @m_mirred={0x1b8, 0xa, 0x0, 0x0, {{0xb}, {0x124, 0x2, 0x0, 0x1, [@TCA_MIRRED_PARMS={0x20, 0x2, {{0x401, 0x9, 0x2, 0x1f, 0x8001}, 0x4}}, @TCA_MIRRED_PARMS={0x20, 0x2, {{0x20, 0xffffffff, 0x2, 0x5, 0x6}, 0x1}}, @TCA_MIRRED_PARMS={0x20, 0x2, {{0x81, 0x3ff, 0x1, 0x9fa, 0x4e}, 0x3, r7}}, @TCA_MIRRED_PARMS={0x20, 0x2, {{0x4, 0x2, 0x6, 0x401, 0xffff}, 0x2}}, @TCA_MIRRED_PARMS={0x20, 0x2, {{0x6, 0x1f, 0x1, 0x9, 0xffff8e06}, 0x2, r8}}, @TCA_MIRRED_PARMS={0x20, 0x2, {{0x87, 0x5, 0x0, 0x1, 0xa2}, 0x3, r11}}, @TCA_MIRRED_PARMS={0x20, 0x2, {{0x9, 0x3f, 0x7, 0x80, 0x1}}}, @TCA_MIRRED_PARMS={0x20, 0x2, {{0xffffffc0, 0xffffffff, 0x10000000, 0x400, 0x1000}, 0x1}}, @TCA_MIRRED_PARMS={0x20, 0x2, {{0x2, 0xffffffff, 0x7, 0x7f, 0x80000000}, 0x3, r12}}]}, {0x6c, 0x6, "ab5da86fb2ad38ac3afa3ca2d0cd2122ee2cd97d06a53895d3c0b5b34115ddcfde0ecf58b865716c834153c8bc3c50b7543b6c0cdf8c7962c3b03b6f2240b13fabb7fb29c2076b65f86abfbea4a1559990231f5d8aa2515ffaf553794a3307e98635070792d8cb64"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x3, 0x2}}}}, @m_ife={0x154, 0xa, 0x0, 0x0, {{0x8}, {0x30, 0x2, 0x0, 0x1, [@TCA_IFE_TYPE={0x6}, @TCA_IFE_TYPE={0x6}, @TCA_IFE_PARMS={0x1c, 0x1, {{0x9, 0x9, 0x20000000, 0x0, 0x400}}}]}, {0xfd, 0x6, "3b320608c3d17fbe4bb044d1d06c3fca18ad65be8a88364bbbac4ecb8418e972d8e09b9b7618d44221c4281710ae381efc8aa7d2b05e352f7d649ec32537db7b79d844225424ff3f610c493ce02e237be4335e34172da888d369f69c2fa93de8d1dd66665448475855cb69c14fd0bb565926a820d5fac76b706daff25e74e0b39b5e153783a160809ad5c42edcd88c8deb091b78e529320ed13f80fc1835d9bc0215c07394b4566382e1ce35382619011e02195e7c720286716226b955b39e61a5190cfe5a1aef22b0f2eabfdd6feaf832e716541aabffa63f97515888c96e94809565450759a673dc2e9be5142bd746d23acd37d4b11c1c09"}, {0xc, 0x7, {0x1, 0x1}}, {0xc, 0x8, {0x1, 0x2}}}}, @m_ife={0x9c, 0x1e, 0x0, 0x0, {{0x8}, {0x48, 0x2, 0x0, 0x1, [@TCA_IFE_METALST={0x34, 0x6, [@IFE_META_TCINDEX={0x4, 0x5, @void}, @IFE_META_TCINDEX={0x6, 0x5, @val=0x3}, @IFE_META_SKBMARK={0x8, 0x1, @val=0xa7d}, @IFE_META_PRIO={0x4, 0x3, @void}, @IFE_META_TCINDEX={0x4, 0x5, @void}, @IFE_META_SKBMARK={0x4, 0x1, @void}, @IFE_META_SKBMARK={0x4, 0x1, @void}, @IFE_META_TCINDEX={0x4, 0x5, @void}, @IFE_META_SKBMARK={0x8, 0x1, @val=0xffff7ff7}]}, @TCA_IFE_METALST={0x10, 0x6, [@IFE_META_SKBMARK={0x4, 0x1, @void}, @IFE_META_PRIO={0x8, 0x3, @val=0x8}]}]}, {0x2f, 0x6, "e8f0c370616d4897fcd654704142cb9e2829feb03369921f4d1e5d204aba6297eb3e0e7f3b943c1ed7653d"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x0, 0x3}}}}, @m_ctinfo={0x114, 0x1, 0x0, 0x0, {{0xb}, {0x14, 0x2, 0x0, 0x1, [@TCA_CTINFO_PARMS_CPMARK_MASK={0x8, 0x7, 0xca92}, @TCA_CTINFO_PARMS_CPMARK_MASK={0x8, 0x7, 0x7dcad435}]}, {0xd7, 0x6, "890cb057c5ed5f061f6b62a0b6322ba15166e9d57d8cd2724ed9c98ec7e8eb9762068cc536dd8afef0859d5bc54fdce694a8c0bd50dc05ed803fedc3a94973e1e52b4477940480f624ac9d05387a1e4ef68e704519dcc3ab670eeb9f25a525b64a8f81ca33a460181b8ad1445e4ebeb0d5d479e63f207a14e8f59a8e805b59b12f4747dd5b2be72b7d759e613626f04d0b6c5d26d49ba7844809eb24a6597241468fd923bc185352307b186217c66594996b5f26c7060001b9bc36813e03f86bd8e344697114ab6550478df4014771afb1462e"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x2, 0x1}}}}, @m_ct={0x64, 0x1f, 0x0, 0x0, {{0x7}, {0x20, 0x2, 0x0, 0x1, [@TCA_CT_NAT_PORT_MIN={0x6, 0xd, 0x4e22}, @TCA_CT_NAT_IPV6_MAX={0x14, 0xc, @remote}]}, {0x1d, 0x6, "4489a70062790508a1d241d3a84e5025263f4dcac759d844a1"}, {0xc}, {0xc, 0x8, {0x1, 0x1}}}}, @m_connmark={0x1050, 0x14, 0x0, 0x0, {{0xd}, {0x20, 0x2, 0x0, 0x1, [@TCA_CONNMARK_PARMS={0x1c, 0x1, {{0x253, 0x0, 0x8, 0x9}, 0x9}}]}, {0x1004, 0x6, "6ebb8648c560751d5724e86edbb0d1336a795e412c21adeda67d66863cc67f73163bec203823c9c257bac4f5882e6cfd2d288901552f4b393cf8927a43317385297629ae4c314f255c1cc57e23b89b4eb612fcbaf4ece5afcdeb3e76d233804a3ee6907ac7f689b7187066988a0054148905795099b441b0af8a157f84554a62fc1561befb837b4233b4b2901adc804dd8b77b92d9f8673c92846f33a86b96dc6f45217bf0aa9b3d38f44f351fcc281ffb13d7e272e528e8c267400efd851ba5427cf2bd9b8bfbdd25c7297b814b7089313329d1054f22e6c7e865845cf484dcfff64ca0e39f7d5b9301c95b0a67875860e0cb1afec2b891e3bfecde1d9ad1cccd1cc4e673abcf53dd4ee68756a949f5aae8a2c56840745d12f2b269ca3561007ab156b96c144d2a6345de9cad2d138adf7461c0bbdfa812cc42df6a4efb70b28c074c461aa8827bb4555aaa41a8c50df0899cf1b44c3209e1929e03811c237331fa7c6ecf9f5135f65ae03fbe44e332916db518cc5dc88629fccdac14c4bd1b3042c3adc57e7ff7005e2f1ec90bce8c22184344f0c2bf4e4c4f3f7fed5a1358b5568d90124b54d65fea1902d3ebfca9fccc4f020a4c9d22f20a9cf745f86ba59efde1c28bd6fe6c726a67a387028ebc78834f9b0cecb0221d7292f1d45a9011ceb485befd06a067dc054ce748af5719a6b5f99735026c0f0a12ee6be3d9fcb15ac5c5ded098ab56432ad4866260d6c1f955dac4e2614af921938055114c32bc602b11ecfacec87000ee05330dc00913d586917540a3fbb1ef87d527d50eff8310e09561cee5b47c5e85bb5485d0390aab3fd697fd51fbff1edd5d4543607e34c976d162522a6a8a2dca0179b3524f8eed903ca98f7c8df7b25ce11d063db7f96e7e8ded3c96201f40edf156db80c1ea43ba4418dd10b7bbf092d973503d90f8024bc68f7640e81c6a11cd633788a8ca6bb110827cf7fb21c19e60ec3654363b8f8d4616f89404b59258a226f89a0df4ab184d7b76cc87adf788e9ecf9d2658ad2d9424b1e872ee4d6cf03acce55a768c94a465eb7654232eb4b39f453de8bc65fc731ea41d140a40359480d370bf8f7c432a838c3f9806dbd8439d20c7d06183dec12754fde3f6fc14b0e02a182d564f089fc163c1cb4905866726128929d080a21277514bdff8eb2b9e3e08f5c666063dbfb94248677983c4b64d8ef579d062770264a42cca5d84d0464b3ee8c55281c6b3c3aa3c7877d7de034cbae1553ca3f4449a8bddca7f13c90f98e4a19c7976dd03f48cde4a9ed6220583f514b324ef190015ec1608c6b104a3d1a1b154a81819e3d7bdca949c9ae09144f7f5bfcdac5e71fe022cd2df17ca1de1e301986c8a845a61374c11a2db26005b6ebb6b53d376fc613f9f6c4880c27a7b3f8c85e87a3151e320eddd67a33154ff107e2ad5dbf57da59aa0b3ba21d054d0ce04f888892afc93c37dff332ae9f518e06a3b197f5a6f4330ec266f3d82428c1d59a3a7f9c8d871f6100005ce2533de35c3294a3187548fa1ddc839196a54dba117fac43be04b95d7781cd09a0a024c6b98c0ec02529b9cc2ccfddbac211d1c639c518975b013408ab8cc6c66bcd5a76ff2148b5532e5736086c37876b8214428bb8292851d1b9894cb88d967812b8ce9c499cd87803310d2e816007721d75dd108a56de8ec827feb0bd5fab496e8f807b9a4a13f853018fa546337794af83c8a2cfe9e96a0190af5cafc5df4e5dbee45ce812f5463533e2aa95e15b1aa3b6e037ada0f9f87b88a324b1b2b41ae59af7f088c060d36aa59473f6f9408c01798679afad180fcfd95651a52dfc407ef18461178a1816d9c6c2d03b95868b51c6492814985db042216a0096705d7cc11121933a250c59ded8a5a78fe4d62ce5000b38049b5eee5fd4ff5f5f0f6fdccf99a0999c460c0af0eefa2eb725d063e9ae5cd2b284d3f2ce617dc5ac3d9a3efbe3dd4340de56836e7684e7eafc87127ad0fcf9d585393f4f459352bdfc90eedee43cd428c0334a7946ce474b4740505bf2482b02aa00b473975594a51a9d52d7f66434ae917d4fb41269cb5595adafad227e0cd0c2677e348ea4562dc3b157c97b32ddf59a292eb4aa4d8c04a0027047eb09fe247f7fc5857e00c75382447b72e3082b6afc928af9d94903b11dc3fe5800b60d3a0a42ff10e5823ebdc0a84aa050f8e2768972ff2fb0fb247e97aa9e1f9d229de7e0335dbcec3a84eb6947e79007b2ffde51cfcc76561b61c41e7c85237509e9f1c2c8227711e6827d0a18ec7cd607eee5615ab5c8b87f0b9a022c742adc7af02224d8bbd42c25da21ead470b577b5ff83497979e702703f2ba015bf66bcb9787dc07ae30a8ed785e747deda4f5d2c2f8d799daf884a6270d64e92353b99f5bc806b7f3703d6bd4f1455e8e07422abd439c3cf296e3fc325715fdcae249d302a4d45ed7e83bdbef6911c017b0a24229f55861595d56b97108eb42f336c0b06ea061decfae73ef606199b1914291588539bfcf783f693ac82285bf562f050f49381498a5bb1f88907635c66318ed87c50574e029601a08ac972345458cd69f2c8cb60f4748fde0dabea5ee5085d03f90aafdd2768919f96d0654684eb9d703f5fa0d84a4b54197acc137939621f1b6592aa529532c70c84dc7cbe9619650250b69bfd702a8e1f0a25399743aa66ae3d917b3a3bb28197fd616fa8752537ffa0c997e1fabc9b520206af891cd7d0884ff49d8a1959bbb0356769bd74c4db5b9530caeb67fc8487a9c27f452b62cfdc3292028d766f26fe057575e5b9d18fd0f45d580cb69e8aeb0470620b9ed89202e17108a4c85cf408a4a9f2192e7b4b061df7248fc665da69be540d1ea30d18a8df2fab3a676640b685414d5aeb750671418bfd9bb06b1a4c27529cc5aecc595485a225fe983f6e1ac4df9c110b13530de54d12becc3725a5f7b78d238c1ed3ff46e862f58ea81edd6a114beccf2df466021d8a2fa2ace79166ea52aac2f7cfa59ca95c336243249e1d2be9d6789163d629a09cec52a618494a419c240837695ee5c7400efda0da8a4488bce4356247884c2572ae98f79fbc1be8cab68f062deaff7f1b66ad7012b6b3c7252c0e404cda7141b666ac906aa233b27181aaf09be2497b9124a030b5039343c4abba563e4acc54b7db484b9c4bd8266049566e4844cf78a7d1859da0fb2b301178cb05954b7a5f318387d3eacc8d8cf0d1f47ed97735aa71c13e4eaf3cda94a39f54dea60d65609b2a716b3cfe059a97363b5f97b2467a4fcdc41eaade7198b17973d26a897fd3e7127c1bfd2c1155fb9e8c163875ac3ad32854a2a2bcb6856cbc57f829dae94d054fef75e615e484ac6500f8437cb99d1a81b2984093138e45480cb347c3391df2e78f143baea580874f07076adf2680ee9f835346a212bc75f17dd5a05c26b02734838d7ee7352ee6a2ec93664093fb0299172996358d812f4ea22c776c499f5765b202898c83c6415e8f2e5cb26d1c934c99749dee7eaf70089a0ff3ec99e578024f2f50bf28ccdb075520057bdc478ff28dcae29aad47cfd66f8e6de90e6659a18d21f0a301dbb92d4f9c8de54018ea4adc43832033f7496596cfd96ea70096d9bec2d658ff59dacf0124bad9a067ed129a986942747a805007ca308c109e86056b4f0aaf4ed4a4505206d54cfb8777af6f7124a2d6046f291ad71c556dd1ca9a8d2e0c6011c3a30fc96fa1090994876db5302c6c5732de98da066a8407b738caedcde567e5b054f3146508cd655e1c8a27fd2fff8d429193635a144f2c393fbbd6a8f83d5a9fe0020f50d5e74489a6b5dda26be9f9811bb5d0e0f9685e1d2ba446febbfd20e02bf24571d21316a6d3baa8d7a1a7b76d9cb9f0f20485530908e5f2b1bee7301bd4793f046ed8b3daf376a229ba9448e5b1106a9da951a04ef628e92c87fe8377fe5edc9d786c6829cd0712a0d7a36a2707c76e9c76ff26f77e96db47e5b6d2a579f45531b223ea3924370974c5699cc3059425406e872e5743365d37e34269d4c4d6be500fff04b7700ca30b4a35df5f4d7055e7524fac2c0bff835a3db22ec7d563a7f34f5200de9308e93d6be6f5b463f8ee6f7d066aa5bff9d9f22f3106ee6819f772b8b0ff5684fde997ab5700972ac7c67fef87a04d7199a6242a405588e11ea99cd0c18a0061945e7964afc2c65c37fa392abf344ad9952bc3b74f6ea5e71bae135db305789fde3d8692cf8c652838a58d340c31088d62cee48546273ebca3c05bd12a0642a98c383953d1a4a60d514ee0609589db0f76032111f0c32e889da4d0dd3c4da3e70d528062147fb3c5a08b530ac20cc08e7f0a9b2c5b69ab8696f5ca4fe31bba43170b7835646543ae1304491e4b08633c73675a33ee3677cb5bb42bbb4acfbd49d0003fd1908938aa34652b391d665e59fe2d93af4c10998d9d0ce3cc92679677e842067a71aa782c6b338162f75297b751cabf956a40cf83651ca9481d6e57b75a914234a3ac873e70e427fec94e865cf04cf347fa2ca5e87cce9bcbeb0f18fde00acac70044c5b2fbf6c5f23d92549beec26ada757266d625a69b423453bcb78ec6572f0e824d3ec1221898656b3e428cab22ed7880e0583d9ead5a9a6a4d92b460fee41389b9d220fed4aafd6346fd7f343176ae7084a81da224c7a41f2f91ec186e0b0a71dee4587e9e383b5a78472e0c0e7e82f4aba50b125e672068df7b3757ab43e4c805657689f94fa2ed24a036c8c95f66803abcee187222d126fbd9de9d21ee7bd4e78835f85e7c5c99c5e633fd883d5408889687d2dd98a24634656b9e05361a731860fc3983e3f389e52d522f28bc4d17236c829de5a44c3077cec1c69029322e4564a9a58a307674b18e149175849d78715efdc0f3270280bf9200fc9729275a79153e6420ea77181a17803b487df5c71a2959ef4518f21c255d842b2e50cf2ad55ec49523a2acda13b59181f58936fdea673633dbc897bcef3b2c245779ee10aaa81ebc8d34fbe7dbe638d98a3e3a9c6ad87bbccf9535fec5da5bf899e03975cdbba357cde3fbeef8c5bcafd0be79b059fa936cdb513eb3fa5785b49edd0ae81ce44a99466a9829f542491cdcf26b2385a925d15a2455e597b0d96f1c9a040c8e8c0bce7808abac3572dd00ae11b2aa5c852c06d59bcaff80d239b4609cbd07ce2921dbdd9fa462d4b46f07cb60a0c5f9af56c68967feaecdfd38ca658d2290c53671ccbb31acaf6deb351bf97f60aaf9a019d461d5ff34be828f5462060ed523a6c329fa108d60dabdc093abe33ac0e816212130b3520e8f035a86b5b9276ef4865cdb513d6b8ad4772744c199919fae3b7b6ede401004916d47d3b59936be3c5b4b5e32a4171d18e088206c52a593f82676ba88784313e5bcf103a08b83004e66ce41ae0d288a99b49cc6d30f539f4bf2623e990237b8a6878c14dfdf40c7ee1a1cab0440bd7cf68703a7a2165dea415982f7f3e1d4e6bbaaa24800f07b24f5a5a8fa7c58889be316e50be6ca0b88b5ab2f2f951d27db3e218b4b0bb883918fb503ca3e11f54347aedf4732e29069acd93b8042fd0784dd0c0b1a4aaf28a62b703850162b55234f03c5022acb0e5d4cb34fe3ee8d57a8894b88570a755348332f997751bd6ec51bf27f1792b70e976caa78a0c19c56c2c6465bc5cac5575220e450a504243d383be232824302d14adb7623427f9e6ee258953db86ef4c7d60357e0059e55b5c293a6febc585212a71c391b3e853425d6b018dc20"}, {0xc, 0x7, {0x0, 0x1}}, {0xc, 0x8, {0x1, 0x3}}}}, @m_csum={0xa4, 0x7, 0x0, 0x0, {{0x9}, {0x20, 0x2, 0x0, 0x1, [@TCA_CSUM_PARMS={0x1c, 0x1, {{0x2, 0x1ff, 0x0, 0xffffffff, 0xfa}, 0x4a}}]}, {0x5c, 0x6, "02fc12910c1fa1fbd15b0a8568dc8b2221df1c7cc4f09acc637d253ccaad4df77282eadb26fdc72728e80db5d2ae50504293746c11ecd32873e29c2ed89ce95882c7eb9a3a8d1a664c3769bd1fb0ce5f356e121bbdef9855"}, {0xc}, {0xc, 0x8, {0x2, 0x3}}}}]}]}, 0x6dac}, 0x1, 0x0, 0x0, 0x40850}, 0x0) [ 2179.773788][ T7617] 8021q: adding VLAN 0 to HW filter on device bond1439 10:41:55 executing program 2: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) r0 = syz_init_net_socket$nfc_llcp(0x27, 0x2, 0x1) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000000000)=0xfffffffffffffbff) r1 = socket$can_bcm(0x1d, 0x2, 0x2) ioctl$EXT4_IOC_PRECACHE_EXTENTS(r1, 0x6612) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) (async) syz_init_net_socket$nfc_llcp(0x27, 0x2, 0x1) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000000000)=0xfffffffffffffbff) (async) socket$can_bcm(0x1d, 0x2, 0x2) (async) ioctl$EXT4_IOC_PRECACHE_EXTENTS(r1, 0x6612) (async) [ 2180.047118][ T7619] bond1439: (slave bridge1338): making interface the new active one [ 2180.140695][ T7619] bond1439: (slave bridge1338): Enslaving as an active interface with an up link 10:41:56 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xfeffffff}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:41:56 executing program 2: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) (async) r0 = syz_init_net_socket$nfc_llcp(0x27, 0x2, 0x1) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000000000)=0xfffffffffffffbff) (async) r1 = socket$can_bcm(0x1d, 0x2, 0x2) ioctl$EXT4_IOC_PRECACHE_EXTENTS(r1, 0x6612) 10:41:56 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) r1 = syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r2 = socket$netlink(0x10, 0x3, 0x0) pipe(&(0x7f0000000140)={0xffffffffffffffff, 0xffffffffffffffff}) close(r3) ioctl$F2FS_IOC_MOVE_RANGE(r0, 0xc020f509, &(0x7f00000001c0)={r4, 0x7d4, 0x72e6, 0x2}) sendmsg$nl_route_sched(r5, &(0x7f00000002c0)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x8000}, 0xc, &(0x7f0000000280)={&(0x7f0000000240)=@deltaction={0x14, 0x31, 0x20, 0x70bd2b, 0x25dfdbfb}, 0x14}, 0x1, 0x0, 0x0, 0x4000094}, 0x80) r6 = socket$nl_generic(0x10, 0x3, 0x10) r7 = socket$nl_generic(0x10, 0x3, 0x10) ioctl$sock_SIOCGIFINDEX_80211(r6, 0x8933, &(0x7f0000000480)={'wlan1\x00', 0x0}) r9 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000180), 0xffffffffffffffff) sendmsg$NL80211_CMD_REMAIN_ON_CHANNEL(r7, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000100)={&(0x7f0000000040)=ANY=[@ANYBLOB=',\x00\x00\x00', @ANYRES16=r9, @ANYBLOB="01000000000000000000370f000008000300", @ANYRES32=r8, @ANYBLOB="080026006c0900000800570080"], 0x2c}}, 0x0) sendmsg$NL80211_CMD_SET_INTERFACE(r3, &(0x7f0000000180)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x80}, 0xc, &(0x7f0000000100)={&(0x7f0000000040)={0xa8, r1, 0x1, 0x70bd27, 0x25dfdbfe, {{}, {@val={0x8, 0x3, r8}, @void}}, [@NL80211_ATTR_4ADDR={0x5, 0x53, 0x1}, @NL80211_ATTR_4ADDR={0x5, 0x53, 0x1}, @NL80211_ATTR_IFTYPE={0x8, 0x5, 0x7}, @NL80211_ATTR_4ADDR={0x5, 0x53, 0x1}, @NL80211_ATTR_IFTYPE={0x8, 0x5, 0x9}, @mon_options=[@NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR={0xa, 0xe8, @broadcast}, @NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR={0xa, 0xe8, @device_b}, @NL80211_ATTR_MNTR_FLAGS={0x1c, 0x17, 0x0, 0x1, [@NL80211_MNTR_FLAG_OTHER_BSS={0x4}, @NL80211_MNTR_FLAG_ACTIVE={0x4}, @NL80211_MNTR_FLAG_PLCPFAIL={0x4}, @NL80211_MNTR_FLAG_FCSFAIL={0x4}, @NL80211_MNTR_FLAG_ACTIVE={0x4}, @NL80211_MNTR_FLAG_COOK_FRAMES={0x4}]}, @NL80211_ATTR_MU_MIMO_GROUP_DATA={0x1c, 0xe7, "20d019bc04e1f4930aa31871725e8a4e14711c26d795a980"}], @NL80211_ATTR_IFTYPE={0x8, 0x5, 0x8}, @NL80211_ATTR_MESH_ID={0xa}]}, 0xa8}, 0x1, 0x0, 0x0, 0x8045}, 0x5) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r2, 0x0, 0x0) [ 2180.279636][ T7631] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 10:41:56 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xda030000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:41:56 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) bind$rxrpc(r0, &(0x7f0000000180)=@in6={0x21, 0x4, 0x2, 0x1c, {0xa, 0x4e22, 0xffff37b2, @initdev={0xfe, 0x88, '\x00', 0x1, 0x0}, 0x3}}, 0x24) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000000140)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x40000000}, 0xc, &(0x7f0000000100)={&(0x7f00000000c0)=ANY=[@ANYBLOB='\x00\x00\x00\x00', @ANYRES16=0x0, @ANYBLOB="000129bd7000fedbdf250a00000008000b00010001000800340008000000"], 0x24}, 0x1, 0x0, 0x0, 0x4004040}, 0x24000000) ioctl$FS_IOC_RESVSP(r0, 0x40305828, &(0x7f0000000000)={0x0, 0x1, 0x9, 0x4000000000007}) [ 2180.306682][ T7631] workqueue: Failed to create a rescuer kthread for wq "bond1403": -EINTR [ 2180.566784][ T7649] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2180.639924][ T7679] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2180.669207][ T7649] 8021q: adding VLAN 0 to HW filter on device bond846 [ 2180.819162][ T7664] bond846: (slave bridge1003): making interface the new active one [ 2180.835128][ T7664] bond846: (slave bridge1003): Enslaving as an active interface with an up link [ 2180.962256][ T7674] netlink: 'syz-executor.3': attribute type 1 has an invalid length. [ 2181.001632][ T7674] 8021q: adding VLAN 0 to HW filter on device bond1440 [ 2181.068953][ T7676] bridge1339: entered promiscuous mode [ 2181.082010][ T7676] bridge1339: entered allmulticast mode [ 2181.181476][ T7683] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2181.249037][ T7683] 8021q: adding VLAN 0 to HW filter on device bond1403 10:41:57 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async, rerun: 32) write$binfmt_script(r0, &(0x7f0000000080)={'#! ', './file0', [{0x20, 'memory.events\x00'}, {0x20, '$--!\''}], 0xa, "a08be2b67d38ea0ac3318f81a3b1bfebd323bfda9663f5bcd09076eddc1112d0c4e303a0625f51780bd71ba3d28631a64454437e00eec4478c24350e037d2dbcf9b448accc9d75a2bb822d83f03e081799a1bfad692f0de4ae73e277bd731e769259a5080575a603829466f18b412efc6f62ade696a6b70efaab5c9b2837c17bf5ab7a836f9e0b8de4ef825774045f5c84f23bbfd1ceb9467a52802b97b9e5119533830f882057f11ad9a8d7870a7a21ee26f41b82d22ae69a9c411a743b3e62cb7175f4f58e"}, 0xe6) (async, rerun: 32) r1 = bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000240)={0x11, 0x3, &(0x7f0000000180)=ANY=[@ANYBLOB="1800000000000004000000000000000095"], &(0x7f00000000c0)='syzkaller\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) r2 = bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000200)={&(0x7f00000004c0)='contention_begin\x00', r1}, 0x10) (async, rerun: 32) r3 = socket$inet6_icmp_raw(0xa, 0x3, 0x3a) (async, rerun: 32) r4 = bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000300)={0x18, 0x5, &(0x7f0000000080)=@framed={{0x18, 0x0, 0x0, 0x0, 0x3000}, [@alu={0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0xffffffffffffffff}, @jmp={0x5, 0x0, 0x2, 0x0, 0x0, 0xfffffffffffffffe}]}, &(0x7f0000000000)='syzkaller\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) (async) r5 = socket$inet6_udp(0xa, 0x2, 0x0) ioctl$BTRFS_IOC_START_SYNC(r5, 0x80089418, &(0x7f00000001c0)=0x0) ioctl$BTRFS_IOC_SNAP_CREATE_V2(r4, 0x50009417, &(0x7f0000000800)={{}, r6, 0x0, @inherit={0x48, &(0x7f00000000c0)=ANY=[]}, @subvolid=0x1f}) (async) ioctl$BTRFS_IOC_SNAP_CREATE_V2(r2, 0x50009417, &(0x7f0000000500)={{r3}, r6, 0x8, @inherit={0x58, &(0x7f0000000000)={0x0, 0x2, 0xa110, 0x4, {0x0, 0x1f, 0x6, 0xaac, 0xfff}, [0x9, 0x26fc]}}, @devid}) (async, rerun: 64) r7 = openat$tun(0xffffffffffffff9c, &(0x7f0000000080), 0x2, 0x0) (async, rerun: 64) r8 = socket$nl_route(0x10, 0x3, 0x0) r9 = socket$inet6_sctp(0xa, 0x5, 0x84) sendmmsg$inet6(r9, &(0x7f0000005900)=[{{&(0x7f0000000180)={0xa, 0x0, 0x0, @private1}, 0x1c, &(0x7f0000001680)=[{&(0x7f00000001c0)="1a", 0x1}], 0x1}}, {{&(0x7f0000002c80)={0xa, 0x0, 0x0, @ipv4={'\x00', '\xff\xff', @private=0xa010101}}, 0x1c, &(0x7f0000004180)=[{&(0x7f0000002d00)="92", 0x1}], 0x1}}], 0x2, 0x4000040) ioctl$BTRFS_IOC_BALANCE_PROGRESS(0xffffffffffffffff, 0x84009422, &(0x7f0000001740)={0x0, 0x0, {0x0, @usage, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, @struct}, {0x0, @usage, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, @struct}, {0x0, @struct}}) ioctl$BTRFS_IOC_SCRUB_PROGRESS(r9, 0xc400941d, &(0x7f00000007c0)={r10, 0x6, 0x6}) (async) ioctl$BTRFS_IOC_SCRUB(r8, 0xc400941b, &(0x7f0000000940)={r10, 0x3f, 0x1, 0x1}) ioctl$BTRFS_IOC_DEV_INFO(r7, 0xd000941e, &(0x7f00000004c0)={r10, "57149989cf1136de6b93f2f3e5ead599"}) (async, rerun: 64) ioctl$BTRFS_IOC_SNAP_DESTROY_V2(r0, 0x5000943f, &(0x7f0000000200)={{r0}, r6, 0x0, @inherit={0x68, &(0x7f0000000180)={0x0, 0x4, 0x0, 0xfffffffffffffffb, {0x10, 0x0, 0x100, 0x8, 0x3ff}, [0x40, 0x0, 0x80000001, 0x9]}}, @devid=r10}) (rerun: 64) ioctl$BTRFS_IOC_SCRUB(r1, 0xc400941b, &(0x7f0000001d80)={r11, 0x993b, 0xfffffffffffffffb}) ioctl$BTRFS_IOC_SCRUB_PROGRESS(r5, 0xc400941d, &(0x7f0000002180)={r12, 0xfffffffffffffff7}) 10:41:57 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) bind$rxrpc(r0, &(0x7f0000000180)=@in6={0x21, 0x4, 0x2, 0x1c, {0xa, 0x4e22, 0xffff37b2, @initdev={0xfe, 0x88, '\x00', 0x1, 0x0}, 0x3}}, 0x24) (async) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000000140)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x40000000}, 0xc, &(0x7f0000000100)={&(0x7f00000000c0)=ANY=[@ANYBLOB='\x00\x00\x00\x00', @ANYRES16=0x0, @ANYBLOB="000129bd7000fedbdf250a00000008000b00010001000800340008000000"], 0x24}, 0x1, 0x0, 0x0, 0x4004040}, 0x24000000) ioctl$FS_IOC_RESVSP(r0, 0x40305828, &(0x7f0000000000)={0x0, 0x1, 0x9, 0x4000000000007}) 10:41:57 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) r1 = syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r2 = socket$netlink(0x10, 0x3, 0x0) pipe(&(0x7f0000000140)={0xffffffffffffffff, 0xffffffffffffffff}) close(r3) ioctl$F2FS_IOC_MOVE_RANGE(r0, 0xc020f509, &(0x7f00000001c0)={r4, 0x7d4, 0x72e6, 0x2}) sendmsg$nl_route_sched(r5, &(0x7f00000002c0)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x8000}, 0xc, &(0x7f0000000280)={&(0x7f0000000240)=@deltaction={0x14, 0x31, 0x20, 0x70bd2b, 0x25dfdbfb}, 0x14}, 0x1, 0x0, 0x0, 0x4000094}, 0x80) r6 = socket$nl_generic(0x10, 0x3, 0x10) r7 = socket$nl_generic(0x10, 0x3, 0x10) ioctl$sock_SIOCGIFINDEX_80211(r6, 0x8933, &(0x7f0000000480)={'wlan1\x00', 0x0}) r9 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000180), 0xffffffffffffffff) sendmsg$NL80211_CMD_REMAIN_ON_CHANNEL(r7, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000100)={&(0x7f0000000040)=ANY=[@ANYBLOB=',\x00\x00\x00', @ANYRES16=r9, @ANYBLOB="01000000000000000000370f000008000300", @ANYRES32=r8, @ANYBLOB="080026006c0900000800570080"], 0x2c}}, 0x0) sendmsg$NL80211_CMD_SET_INTERFACE(r3, &(0x7f0000000180)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x80}, 0xc, &(0x7f0000000100)={&(0x7f0000000040)={0xa8, r1, 0x1, 0x70bd27, 0x25dfdbfe, {{}, {@val={0x8, 0x3, r8}, @void}}, [@NL80211_ATTR_4ADDR={0x5, 0x53, 0x1}, @NL80211_ATTR_4ADDR={0x5, 0x53, 0x1}, @NL80211_ATTR_IFTYPE={0x8, 0x5, 0x7}, @NL80211_ATTR_4ADDR={0x5, 0x53, 0x1}, @NL80211_ATTR_IFTYPE={0x8, 0x5, 0x9}, @mon_options=[@NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR={0xa, 0xe8, @broadcast}, @NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR={0xa, 0xe8, @device_b}, @NL80211_ATTR_MNTR_FLAGS={0x1c, 0x17, 0x0, 0x1, [@NL80211_MNTR_FLAG_OTHER_BSS={0x4}, @NL80211_MNTR_FLAG_ACTIVE={0x4}, @NL80211_MNTR_FLAG_PLCPFAIL={0x4}, @NL80211_MNTR_FLAG_FCSFAIL={0x4}, @NL80211_MNTR_FLAG_ACTIVE={0x4}, @NL80211_MNTR_FLAG_COOK_FRAMES={0x4}]}, @NL80211_ATTR_MU_MIMO_GROUP_DATA={0x1c, 0xe7, "20d019bc04e1f4930aa31871725e8a4e14711c26d795a980"}], @NL80211_ATTR_IFTYPE={0x8, 0x5, 0x8}, @NL80211_ATTR_MESH_ID={0xa}]}, 0xa8}, 0x1, 0x0, 0x0, 0x8045}, 0x5) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r2, 0x0, 0x0) mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (async) accept(0xffffffffffffffff, 0x0, 0x0) (async) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) socket$netlink(0x10, 0x3, 0x0) (async) pipe(&(0x7f0000000140)) (async) close(r3) (async) ioctl$F2FS_IOC_MOVE_RANGE(r0, 0xc020f509, &(0x7f00000001c0)={r4, 0x7d4, 0x72e6, 0x2}) (async) sendmsg$nl_route_sched(r5, &(0x7f00000002c0)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x8000}, 0xc, &(0x7f0000000280)={&(0x7f0000000240)=@deltaction={0x14, 0x31, 0x20, 0x70bd2b, 0x25dfdbfb}, 0x14}, 0x1, 0x0, 0x0, 0x4000094}, 0x80) (async) socket$nl_generic(0x10, 0x3, 0x10) (async) socket$nl_generic(0x10, 0x3, 0x10) (async) ioctl$sock_SIOCGIFINDEX_80211(r6, 0x8933, &(0x7f0000000480)={'wlan1\x00'}) (async) syz_genetlink_get_family_id$nl80211(&(0x7f0000000180), 0xffffffffffffffff) (async) sendmsg$NL80211_CMD_REMAIN_ON_CHANNEL(r7, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000100)={&(0x7f0000000040)=ANY=[@ANYBLOB=',\x00\x00\x00', @ANYRES16=r9, @ANYBLOB="01000000000000000000370f000008000300", @ANYRES32=r8, @ANYBLOB="080026006c0900000800570080"], 0x2c}}, 0x0) (async) sendmsg$NL80211_CMD_SET_INTERFACE(r3, &(0x7f0000000180)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x80}, 0xc, &(0x7f0000000100)={&(0x7f0000000040)={0xa8, r1, 0x1, 0x70bd27, 0x25dfdbfe, {{}, {@val={0x8, 0x3, r8}, @void}}, [@NL80211_ATTR_4ADDR={0x5, 0x53, 0x1}, @NL80211_ATTR_4ADDR={0x5, 0x53, 0x1}, @NL80211_ATTR_IFTYPE={0x8, 0x5, 0x7}, @NL80211_ATTR_4ADDR={0x5, 0x53, 0x1}, @NL80211_ATTR_IFTYPE={0x8, 0x5, 0x9}, @mon_options=[@NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR={0xa, 0xe8, @broadcast}, @NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR={0xa, 0xe8, @device_b}, @NL80211_ATTR_MNTR_FLAGS={0x1c, 0x17, 0x0, 0x1, [@NL80211_MNTR_FLAG_OTHER_BSS={0x4}, @NL80211_MNTR_FLAG_ACTIVE={0x4}, @NL80211_MNTR_FLAG_PLCPFAIL={0x4}, @NL80211_MNTR_FLAG_FCSFAIL={0x4}, @NL80211_MNTR_FLAG_ACTIVE={0x4}, @NL80211_MNTR_FLAG_COOK_FRAMES={0x4}]}, @NL80211_ATTR_MU_MIMO_GROUP_DATA={0x1c, 0xe7, "20d019bc04e1f4930aa31871725e8a4e14711c26d795a980"}], @NL80211_ATTR_IFTYPE={0x8, 0x5, 0x8}, @NL80211_ATTR_MESH_ID={0xa}]}, 0xa8}, 0x1, 0x0, 0x0, 0x8045}, 0x5) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) sendmsg$nl_route(r2, 0x0, 0x0) (async) 10:41:57 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xd2010000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) [ 2181.337021][ T7684] bond1403: (slave bridge1268): making interface the new active one [ 2181.349118][ T7684] bond1403: (slave bridge1268): Enslaving as an active interface with an up link 10:41:57 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xff0f0000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:41:57 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xdb0b0000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2181.437116][ T7693] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:41:57 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) bind$rxrpc(r0, &(0x7f0000000180)=@in6={0x21, 0x4, 0x2, 0x1c, {0xa, 0x4e22, 0xffff37b2, @initdev={0xfe, 0x88, '\x00', 0x1, 0x0}, 0x3}}, 0x24) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000000140)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x40000000}, 0xc, &(0x7f0000000100)={&(0x7f00000000c0)=ANY=[@ANYBLOB='\x00\x00\x00\x00', @ANYRES16=0x0, @ANYBLOB="000129bd7000fedbdf250a00000008000b00010001000800340008000000"], 0x24}, 0x1, 0x0, 0x0, 0x4004040}, 0x24000000) ioctl$FS_IOC_RESVSP(r0, 0x40305828, &(0x7f0000000000)={0x0, 0x1, 0x9, 0x4000000000007}) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) bind$rxrpc(r0, &(0x7f0000000180)=@in6={0x21, 0x4, 0x2, 0x1c, {0xa, 0x4e22, 0xffff37b2, @initdev={0xfe, 0x88, '\x00', 0x1, 0x0}, 0x3}}, 0x24) (async) sendmsg$BATADV_CMD_GET_GATEWAYS(0xffffffffffffffff, &(0x7f0000000140)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x40000000}, 0xc, &(0x7f0000000100)={&(0x7f00000000c0)=ANY=[@ANYBLOB='\x00\x00\x00\x00', @ANYRES16=0x0, @ANYBLOB="000129bd7000fedbdf250a00000008000b00010001000800340008000000"], 0x24}, 0x1, 0x0, 0x0, 0x4004040}, 0x24000000) (async) ioctl$FS_IOC_RESVSP(r0, 0x40305828, &(0x7f0000000000)={0x0, 0x1, 0x9, 0x4000000000007}) (async) [ 2181.532873][ T7701] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2181.561021][ T7698] netlink: 'syz-executor.3': attribute type 1 has an invalid length. [ 2181.712965][ T7727] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2181.735467][ T7698] 8021q: adding VLAN 0 to HW filter on device bond1441 10:41:57 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) write$binfmt_script(r0, &(0x7f0000000080)={'#! ', './file0', [{0x20, 'memory.events\x00'}, {0x20, '$--!\''}], 0xa, "a08be2b67d38ea0ac3318f81a3b1bfebd323bfda9663f5bcd09076eddc1112d0c4e303a0625f51780bd71ba3d28631a64454437e00eec4478c24350e037d2dbcf9b448accc9d75a2bb822d83f03e081799a1bfad692f0de4ae73e277bd731e769259a5080575a603829466f18b412efc6f62ade696a6b70efaab5c9b2837c17bf5ab7a836f9e0b8de4ef825774045f5c84f23bbfd1ceb9467a52802b97b9e5119533830f882057f11ad9a8d7870a7a21ee26f41b82d22ae69a9c411a743b3e62cb7175f4f58e"}, 0xe6) (async) r1 = bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000240)={0x11, 0x3, &(0x7f0000000180)=ANY=[@ANYBLOB="1800000000000004000000000000000095"], &(0x7f00000000c0)='syzkaller\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) r2 = bpf$BPF_RAW_TRACEPOINT_OPEN(0x11, &(0x7f0000000200)={&(0x7f00000004c0)='contention_begin\x00', r1}, 0x10) (async) r3 = socket$inet6_icmp_raw(0xa, 0x3, 0x3a) (async) r4 = bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f0000000300)={0x18, 0x5, &(0x7f0000000080)=@framed={{0x18, 0x0, 0x0, 0x0, 0x3000}, [@alu={0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0xffffffffffffffff}, @jmp={0x5, 0x0, 0x2, 0x0, 0x0, 0xfffffffffffffffe}]}, &(0x7f0000000000)='syzkaller\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) r5 = socket$inet6_udp(0xa, 0x2, 0x0) ioctl$BTRFS_IOC_START_SYNC(r5, 0x80089418, &(0x7f00000001c0)=0x0) ioctl$BTRFS_IOC_SNAP_CREATE_V2(r4, 0x50009417, &(0x7f0000000800)={{}, r6, 0x0, @inherit={0x48, &(0x7f00000000c0)=ANY=[]}, @subvolid=0x1f}) ioctl$BTRFS_IOC_SNAP_CREATE_V2(r2, 0x50009417, &(0x7f0000000500)={{r3}, r6, 0x8, @inherit={0x58, &(0x7f0000000000)={0x0, 0x2, 0xa110, 0x4, {0x0, 0x1f, 0x6, 0xaac, 0xfff}, [0x9, 0x26fc]}}, @devid}) r7 = openat$tun(0xffffffffffffff9c, &(0x7f0000000080), 0x2, 0x0) r8 = socket$nl_route(0x10, 0x3, 0x0) (async) r9 = socket$inet6_sctp(0xa, 0x5, 0x84) sendmmsg$inet6(r9, &(0x7f0000005900)=[{{&(0x7f0000000180)={0xa, 0x0, 0x0, @private1}, 0x1c, &(0x7f0000001680)=[{&(0x7f00000001c0)="1a", 0x1}], 0x1}}, {{&(0x7f0000002c80)={0xa, 0x0, 0x0, @ipv4={'\x00', '\xff\xff', @private=0xa010101}}, 0x1c, &(0x7f0000004180)=[{&(0x7f0000002d00)="92", 0x1}], 0x1}}], 0x2, 0x4000040) (async) ioctl$BTRFS_IOC_BALANCE_PROGRESS(0xffffffffffffffff, 0x84009422, &(0x7f0000001740)={0x0, 0x0, {0x0, @usage, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, @struct}, {0x0, @usage, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, @struct}, {0x0, @struct}}) ioctl$BTRFS_IOC_SCRUB_PROGRESS(r9, 0xc400941d, &(0x7f00000007c0)={r10, 0x6, 0x6}) (async) ioctl$BTRFS_IOC_SCRUB(r8, 0xc400941b, &(0x7f0000000940)={r10, 0x3f, 0x1, 0x1}) ioctl$BTRFS_IOC_DEV_INFO(r7, 0xd000941e, &(0x7f00000004c0)={r10, "57149989cf1136de6b93f2f3e5ead599"}) (async) ioctl$BTRFS_IOC_SNAP_DESTROY_V2(r0, 0x5000943f, &(0x7f0000000200)={{r0}, r6, 0x0, @inherit={0x68, &(0x7f0000000180)={0x0, 0x4, 0x0, 0xfffffffffffffffb, {0x10, 0x0, 0x100, 0x8, 0x3ff}, [0x40, 0x0, 0x80000001, 0x9]}}, @devid=r10}) (async) ioctl$BTRFS_IOC_SCRUB(r1, 0xc400941b, &(0x7f0000001d80)={r11, 0x993b, 0xfffffffffffffffb}) ioctl$BTRFS_IOC_SCRUB_PROGRESS(r5, 0xc400941d, &(0x7f0000002180)={r12, 0xfffffffffffffff7}) [ 2181.818532][ T7729] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:41:57 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000000000)=0x100) [ 2181.951333][ T7734] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:41:58 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000000000)=0x100) 10:41:58 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async, rerun: 64) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (rerun: 64) accept(0xffffffffffffffff, 0x0, 0x0) (async) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) r1 = syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r2 = socket$netlink(0x10, 0x3, 0x0) (async) pipe(&(0x7f0000000140)={0xffffffffffffffff, 0xffffffffffffffff}) close(r3) ioctl$F2FS_IOC_MOVE_RANGE(r0, 0xc020f509, &(0x7f00000001c0)={r4, 0x7d4, 0x72e6, 0x2}) sendmsg$nl_route_sched(r5, &(0x7f00000002c0)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x8000}, 0xc, &(0x7f0000000280)={&(0x7f0000000240)=@deltaction={0x14, 0x31, 0x20, 0x70bd2b, 0x25dfdbfb}, 0x14}, 0x1, 0x0, 0x0, 0x4000094}, 0x80) r6 = socket$nl_generic(0x10, 0x3, 0x10) (async) r7 = socket$nl_generic(0x10, 0x3, 0x10) ioctl$sock_SIOCGIFINDEX_80211(r6, 0x8933, &(0x7f0000000480)={'wlan1\x00', 0x0}) (async) r9 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000180), 0xffffffffffffffff) sendmsg$NL80211_CMD_REMAIN_ON_CHANNEL(r7, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000100)={&(0x7f0000000040)=ANY=[@ANYBLOB=',\x00\x00\x00', @ANYRES16=r9, @ANYBLOB="01000000000000000000370f000008000300", @ANYRES32=r8, @ANYBLOB="080026006c0900000800570080"], 0x2c}}, 0x0) sendmsg$NL80211_CMD_SET_INTERFACE(r3, &(0x7f0000000180)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x80}, 0xc, &(0x7f0000000100)={&(0x7f0000000040)={0xa8, r1, 0x1, 0x70bd27, 0x25dfdbfe, {{}, {@val={0x8, 0x3, r8}, @void}}, [@NL80211_ATTR_4ADDR={0x5, 0x53, 0x1}, @NL80211_ATTR_4ADDR={0x5, 0x53, 0x1}, @NL80211_ATTR_IFTYPE={0x8, 0x5, 0x7}, @NL80211_ATTR_4ADDR={0x5, 0x53, 0x1}, @NL80211_ATTR_IFTYPE={0x8, 0x5, 0x9}, @mon_options=[@NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR={0xa, 0xe8, @broadcast}, @NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR={0xa, 0xe8, @device_b}, @NL80211_ATTR_MNTR_FLAGS={0x1c, 0x17, 0x0, 0x1, [@NL80211_MNTR_FLAG_OTHER_BSS={0x4}, @NL80211_MNTR_FLAG_ACTIVE={0x4}, @NL80211_MNTR_FLAG_PLCPFAIL={0x4}, @NL80211_MNTR_FLAG_FCSFAIL={0x4}, @NL80211_MNTR_FLAG_ACTIVE={0x4}, @NL80211_MNTR_FLAG_COOK_FRAMES={0x4}]}, @NL80211_ATTR_MU_MIMO_GROUP_DATA={0x1c, 0xe7, "20d019bc04e1f4930aa31871725e8a4e14711c26d795a980"}], @NL80211_ATTR_IFTYPE={0x8, 0x5, 0x8}, @NL80211_ATTR_MESH_ID={0xa}]}, 0xa8}, 0x1, 0x0, 0x0, 0x8045}, 0x5) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) sendmsg$nl_route(r2, 0x0, 0x0) [ 2182.024652][ T7702] bond1441: (slave bridge1339): making interface the new active one [ 2182.055138][ T7702] bond1441: (slave bridge1339): Enslaving as an active interface with an up link 10:41:58 executing program 4: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) 10:41:58 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xff7f0000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2182.125588][ T7741] EXT4-fs warning (device sda1): ext4_group_extend:1861: can't shrink FS - resize aborted [ 2182.144054][ T7721] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 10:41:58 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000000000)=0x100) 10:41:58 executing program 4: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) [ 2182.301085][ T7753] EXT4-fs warning (device sda1): ext4_group_extend:1861: can't shrink FS - resize aborted [ 2182.328151][ T7721] 8021q: adding VLAN 0 to HW filter on device bond1404 [ 2182.340417][ T7697] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2182.357093][ T7697] workqueue: Failed to create a rescuer kthread for wq "bond847": -EINTR [ 2182.439961][ T7726] bond1404: (slave bridge1269): making interface the new active one [ 2182.480010][ T7726] bond1404: (slave bridge1269): Enslaving as an active interface with an up link 10:41:58 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xe2020000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:41:58 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r0) ioctl$FS_IOC_SETFLAGS(r0, 0x40086602, &(0x7f0000000000)=0x8) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r1 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r1, 0x0, 0x0) 10:41:58 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r1 = socket$inet6_udplite(0xa, 0x2, 0x88) ioctl$BTRFS_IOC_LOGICAL_INO_V2(r1, 0xc038943b, &(0x7f0000000080)={0x1ff, 0x30, '\x00', 0x0, &(0x7f0000000000)=[0x0, 0x0, 0x0, 0x0, 0x0, 0x0]}) 10:41:58 executing program 4: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) 10:41:58 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xdc0b0000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2182.630749][ T7751] netlink: 'syz-executor.3': attribute type 1 has an invalid length. 10:41:58 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000000)='blkio.bfq.io_merged\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r1 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r1, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) openat$cgroup_ro(r1, &(0x7f0000000040)='blkio.bfq.io_wait_time\x00', 0x0, 0x0) [ 2182.708638][ T7768] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:41:58 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) r1 = socket$inet6_udplite(0xa, 0x2, 0x88) ioctl$BTRFS_IOC_LOGICAL_INO_V2(r1, 0xc038943b, &(0x7f0000000080)={0x1ff, 0x30, '\x00', 0x0, &(0x7f0000000000)=[0x0, 0x0, 0x0, 0x0, 0x0, 0x0]}) [ 2182.827418][ T7751] 8021q: adding VLAN 0 to HW filter on device bond1442 10:41:58 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) r1 = socket$inet6_udplite(0xa, 0x2, 0x88) ioctl$BTRFS_IOC_LOGICAL_INO_V2(r1, 0xc038943b, &(0x7f0000000080)={0x1ff, 0x30, '\x00', 0x0, &(0x7f0000000000)=[0x0, 0x0, 0x0, 0x0, 0x0, 0x0]}) 10:41:58 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000000)='blkio.bfq.io_merged\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async, rerun: 32) r1 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) (rerun: 32) openat$cgroup_ro(r1, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) (async, rerun: 64) openat$cgroup_ro(r1, &(0x7f0000000040)='blkio.bfq.io_wait_time\x00', 0x0, 0x0) (rerun: 64) 10:41:59 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xffff0300}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:41:59 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r0) ioctl$FS_IOC_SETFLAGS(r0, 0x40086602, &(0x7f0000000000)=0x8) (async) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) (async) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async, rerun: 32) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async, rerun: 32) r1 = socket$netlink(0x10, 0x3, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r1, 0x0, 0x0) 10:41:59 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) bpf$BPF_BTF_LOAD(0x12, &(0x7f0000000100)={&(0x7f0000000080)={{0xeb9f, 0x1, 0x0, 0x18, 0x0, 0x4c, 0x4c, 0x9, [@union={0x10, 0x3, 0x0, 0x5, 0x0, 0x1, [{0x5, 0x2, 0xcc}, {0x2004, 0x4, 0x3}, {0x9, 0x1, 0xe9b}]}, @var={0xc, 0x0, 0x0, 0xe, 0x5, 0x1}, @func={0x7, 0x0, 0x0, 0xc, 0x5}]}, {0x0, [0x5f, 0x30, 0x30, 0x30, 0x4f, 0x30, 0x5f]}}, &(0x7f0000000000)=""/48, 0x6d, 0x30, 0x1}, 0x20) [ 2183.186605][ T7755] bond1442: (slave bridge1340): making interface the new active one [ 2183.199949][ T7755] bond1442: (slave bridge1340): Enslaving as an active interface with an up link [ 2183.332886][ T7767] 8021q: adding VLAN 0 to HW filter on device bond1405 [ 2183.488677][ T7773] bond1405: (slave bridge1270): making interface the new active one [ 2183.517728][ T7773] bond1405: (slave bridge1270): Enslaving as an active interface with an up link [ 2183.606487][ T7785] 8021q: adding VLAN 0 to HW filter on device bond847 10:41:59 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xe2030000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:41:59 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000000)='blkio.bfq.io_merged\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r1 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r1, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) (async) openat$cgroup_ro(r1, &(0x7f0000000040)='blkio.bfq.io_wait_time\x00', 0x0, 0x0) 10:41:59 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) bpf$BPF_BTF_LOAD(0x12, &(0x7f0000000100)={&(0x7f0000000080)={{0xeb9f, 0x1, 0x0, 0x18, 0x0, 0x4c, 0x4c, 0x9, [@union={0x10, 0x3, 0x0, 0x5, 0x0, 0x1, [{0x5, 0x2, 0xcc}, {0x2004, 0x4, 0x3}, {0x9, 0x1, 0xe9b}]}, @var={0xc, 0x0, 0x0, 0xe, 0x5, 0x1}, @func={0x7, 0x0, 0x0, 0xc, 0x5}]}, {0x0, [0x5f, 0x30, 0x30, 0x30, 0x4f, 0x30, 0x5f]}}, &(0x7f0000000000)=""/48, 0x6d, 0x30, 0x1}, 0x20) 10:41:59 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r0) ioctl$FS_IOC_SETFLAGS(r0, 0x40086602, &(0x7f0000000000)=0x8) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (async) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) r1 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r1, 0x0, 0x0) 10:41:59 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xdf0b0000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2183.697823][ T7804] 8021q: adding VLAN 0 to HW filter on device bond1443 10:41:59 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) bpf$BPF_BTF_LOAD(0x12, &(0x7f0000000100)={&(0x7f0000000080)={{0xeb9f, 0x1, 0x0, 0x18, 0x0, 0x4c, 0x4c, 0x9, [@union={0x10, 0x3, 0x0, 0x5, 0x0, 0x1, [{0x5, 0x2, 0xcc}, {0x2004, 0x4, 0x3}, {0x9, 0x1, 0xe9b}]}, @var={0xc, 0x0, 0x0, 0xe, 0x5, 0x1}, @func={0x7, 0x0, 0x0, 0xc, 0x5}]}, {0x0, [0x5f, 0x30, 0x30, 0x30, 0x4f, 0x30, 0x5f]}}, &(0x7f0000000000)=""/48, 0x6d, 0x30, 0x1}, 0x20) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) bpf$BPF_BTF_LOAD(0x12, &(0x7f0000000100)={&(0x7f0000000080)={{0xeb9f, 0x1, 0x0, 0x18, 0x0, 0x4c, 0x4c, 0x9, [@union={0x10, 0x3, 0x0, 0x5, 0x0, 0x1, [{0x5, 0x2, 0xcc}, {0x2004, 0x4, 0x3}, {0x9, 0x1, 0xe9b}]}, @var={0xc, 0x0, 0x0, 0xe, 0x5, 0x1}, @func={0x7, 0x0, 0x0, 0xc, 0x5}]}, {0x0, [0x5f, 0x30, 0x30, 0x30, 0x4f, 0x30, 0x5f]}}, &(0x7f0000000000)=""/48, 0x6d, 0x30, 0x1}, 0x20) (async) 10:41:59 executing program 4: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2183.856201][ T7805] bridge1341: entered promiscuous mode [ 2183.916504][ T7805] bridge1341: entered allmulticast mode 10:42:00 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000000)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:00 executing program 4: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2184.149667][ T7805] bond1443: (slave bridge1341): making interface the new active one 10:42:00 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xffffa888}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:00 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_SWAP_BOOT(0xffffffffffffffff, 0x6611) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) r0 = accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r1 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r2) r3 = syz_genetlink_get_family_id$batadv(&(0x7f0000000040), r0) sendmsg$BATADV_CMD_GET_MESH(r2, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x1000004}, 0xc, &(0x7f00000000c0)={&(0x7f0000000180)=ANY=[@ANYBLOB="2400000034c23fc7be0e363591e7b7103b4948e5a8a4bb697956b8ed4582b542e983467a3877ece377955ae76309153424d4ff759023b1b8173e06db4fcdb9ea6f24a15bc2258c20f8e4e34d9f379895c2d022d3974287cd55ddc4082f8c233210cfa97357fdf5b62dd61e1500"/124, @ANYRES16=r3, @ANYBLOB="01002dbd7000fcdbdf2501000000050038000100000008000b0006000000"], 0x24}, 0x1, 0x0, 0x0, 0x24000850}, 0x40) sendmsg$nl_route(r1, 0x0, 0x0) 10:42:00 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000000)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2184.190166][ T7805] bond1443: (slave bridge1341): Enslaving as an active interface with an up link [ 2184.339676][ T7817] 8021q: adding VLAN 0 to HW filter on device bond1406 [ 2184.368399][ T7821] validate_nla: 4 callbacks suppressed [ 2184.368416][ T7821] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2184.429950][ T7821] 8021q: adding VLAN 0 to HW filter on device bond848 [ 2184.502142][ T7823] bond1406: (slave bridge1271): making interface the new active one [ 2184.515809][ T7823] bond1406: (slave bridge1271): Enslaving as an active interface with an up link 10:42:00 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xe4ffffff}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:42:00 executing program 4: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:00 executing program 2: openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000000)='memory.events\x00', 0x275a, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000000)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:00 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) ioctl$EXT4_IOC_SWAP_BOOT(0xffffffffffffffff, 0x6611) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) (async) r0 = accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r1 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r2) syz_genetlink_get_family_id$batadv(&(0x7f0000000040), r0) (async) r3 = syz_genetlink_get_family_id$batadv(&(0x7f0000000040), r0) sendmsg$BATADV_CMD_GET_MESH(r2, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x1000004}, 0xc, &(0x7f00000000c0)={&(0x7f0000000180)=ANY=[@ANYBLOB="2400000034c23fc7be0e363591e7b7103b4948e5a8a4bb697956b8ed4582b542e983467a3877ece377955ae76309153424d4ff759023b1b8173e06db4fcdb9ea6f24a15bc2258c20f8e4e34d9f379895c2d022d3974287cd55ddc4082f8c233210cfa97357fdf5b62dd61e1500"/124, @ANYRES16=r3, @ANYBLOB="01002dbd7000fcdbdf2501000000050038000100000008000b0006000000"], 0x24}, 0x1, 0x0, 0x0, 0x24000850}, 0x40) (async) sendmsg$BATADV_CMD_GET_MESH(r2, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x1000004}, 0xc, &(0x7f00000000c0)={&(0x7f0000000180)=ANY=[@ANYBLOB="2400000034c23fc7be0e363591e7b7103b4948e5a8a4bb697956b8ed4582b542e983467a3877ece377955ae76309153424d4ff759023b1b8173e06db4fcdb9ea6f24a15bc2258c20f8e4e34d9f379895c2d022d3974287cd55ddc4082f8c233210cfa97357fdf5b62dd61e1500"/124, @ANYRES16=r3, @ANYBLOB="01002dbd7000fcdbdf2501000000050038000100000008000b0006000000"], 0x24}, 0x1, 0x0, 0x0, 0x24000850}, 0x40) sendmsg$nl_route(r1, 0x0, 0x0) 10:42:00 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xe2030000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2184.581195][ T7828] bond848: (slave bridge1004): making interface the new active one [ 2184.600467][ T7828] bond848: (slave bridge1004): Enslaving as an active interface with an up link 10:42:00 executing program 2: sendmsg$TIPC_CMD_SET_LINK_PRI(0xffffffffffffffff, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x200000}, 0xc, &(0x7f00000000c0)={&(0x7f0000000080)={0x30, 0x0, 0x200, 0x70bd2b, 0x25dfdbfe, {{}, {}, {0x14, 0x18, {0x6, @bearer=@udp='udp:syz2\x00'}}}, ["", ""]}, 0x30}, 0x1, 0x0, 0x0, 0x80}, 0x40) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r1, 0x0, 0x8000000000004) sendmsg$NFNL_MSG_CTHELPER_DEL(r1, &(0x7f0000000240)={&(0x7f0000000140)={0x10, 0x0, 0x0, 0x10}, 0xc, &(0x7f0000000200)={&(0x7f0000000180)={0x64, 0x2, 0x9, 0x301, 0x0, 0x0, {0x2, 0x0, 0x1}, [@NFCTH_STATUS={0x8, 0x6, 0x1, 0x0, 0x1}, @NFCTH_PRIV_DATA_LEN={0x8, 0x5, 0x1, 0x0, 0x1e}, @NFCTH_TUPLE={0x18, 0x2, [@CTA_TUPLE_IP={0x14, 0x1, 0x0, 0x1, @ipv4={{0x8, 0x1, @local}, {0x8, 0x2, @broadcast}}}]}, @NFCTH_POLICY={0xc, 0x4, 0x0, 0x1, {0x8, 0x1, 0x1, 0x0, 0xffffffff}}, @NFCTH_POLICY={0xc, 0x4, 0x0, 0x1, {0x8, 0x1, 0x1, 0x0, 0x10001}}, @NFCTH_QUEUE_NUM={0x8, 0x3, 0x1, 0x0, 0xff}, @NFCTH_PRIV_DATA_LEN={0x8, 0x5, 0x1, 0x0, 0x1}]}, 0x64}, 0x1, 0x0, 0x0, 0x4004080}, 0x800) 10:42:00 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) pipe(&(0x7f0000000140)={0xffffffffffffffff, 0xffffffffffffffff}) close(r1) sendmsg$nl_route(r1, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x2}, 0xc, &(0x7f00000000c0)={&(0x7f0000000180)=ANY=[@ANYBLOB="200000006a00060328fd7000ffdbdf253fcb000000000000000028000a0000000000"], 0x20}, 0x1, 0x0, 0x0, 0x4080}, 0x84) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) write$binfmt_elf64(r2, &(0x7f00000001c0)={{0x7f, 0x45, 0x4c, 0x46, 0x7f, 0x4, 0x7, 0x9, 0x1e2, 0x7, 0x3, 0xe9, 0x288, 0x40, 0x304, 0x3, 0x800, 0x38, 0x1, 0x0, 0xb219, 0x1ff}, [{0x4, 0x200, 0x8000, 0x1, 0x80000000, 0x2, 0x427, 0x3ff}, {0x2, 0x40, 0x7, 0x8, 0x96b, 0x8000000000000000, 0x6, 0x9}], "b56ab07e7f316ae9fbc9528218a1b3c9c17104349b7575d43e692ac16e209ef3792e0533d4d34157901e2ea15817ab18c7a24ee72f486ece10682ec88489741d62f1709475d3ae437eed3060806d12fbb807996c36b55f580bb51aad00ecb55c14a7f7603237900555419ba878fb5e6fc9e4b453d0f668e7e3f2133227", ['\x00', '\x00', '\x00']}, 0x42d) [ 2184.790065][ T7850] netlink: 'syz-executor.3': attribute type 1 has an invalid length. [ 2184.862917][ T7850] 8021q: adding VLAN 0 to HW filter on device bond1444 [ 2184.927252][ T7854] bond1444: (slave bridge1342): making interface the new active one [ 2184.950006][ T7854] bond1444: (slave bridge1342): Enslaving as an active interface with an up link [ 2184.959591][ T7858] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2185.010017][ T7882] EXT4-fs warning: 40 callbacks suppressed [ 2185.010034][ T7882] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:42:01 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xfffff000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2185.051801][ T7858] 8021q: adding VLAN 0 to HW filter on device bond1407 10:42:01 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) pipe(&(0x7f0000000140)={0xffffffffffffffff, 0xffffffffffffffff}) close(r1) (async, rerun: 32) sendmsg$nl_route(r1, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x2}, 0xc, &(0x7f00000000c0)={&(0x7f0000000180)=ANY=[@ANYBLOB="200000006a00060328fd7000ffdbdf253fcb000000000000000028000a0000000000"], 0x20}, 0x1, 0x0, 0x0, 0x4080}, 0x84) (rerun: 32) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) write$binfmt_elf64(r2, &(0x7f00000001c0)={{0x7f, 0x45, 0x4c, 0x46, 0x7f, 0x4, 0x7, 0x9, 0x1e2, 0x7, 0x3, 0xe9, 0x288, 0x40, 0x304, 0x3, 0x800, 0x38, 0x1, 0x0, 0xb219, 0x1ff}, [{0x4, 0x200, 0x8000, 0x1, 0x80000000, 0x2, 0x427, 0x3ff}, {0x2, 0x40, 0x7, 0x8, 0x96b, 0x8000000000000000, 0x6, 0x9}], "b56ab07e7f316ae9fbc9528218a1b3c9c17104349b7575d43e692ac16e209ef3792e0533d4d34157901e2ea15817ab18c7a24ee72f486ece10682ec88489741d62f1709475d3ae437eed3060806d12fbb807996c36b55f580bb51aad00ecb55c14a7f7603237900555419ba878fb5e6fc9e4b453d0f668e7e3f2133227", ['\x00', '\x00', '\x00']}, 0x42d) 10:42:01 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) ioctl$EXT4_IOC_SWAP_BOOT(0xffffffffffffffff, 0x6611) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (async) r0 = accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async, rerun: 32) r1 = socket$netlink(0x10, 0x3, 0x0) (async, rerun: 32) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r2) r3 = syz_genetlink_get_family_id$batadv(&(0x7f0000000040), r0) sendmsg$BATADV_CMD_GET_MESH(r2, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x1000004}, 0xc, &(0x7f00000000c0)={&(0x7f0000000180)=ANY=[@ANYBLOB="2400000034c23fc7be0e363591e7b7103b4948e5a8a4bb697956b8ed4582b542e983467a3877ece377955ae76309153424d4ff759023b1b8173e06db4fcdb9ea6f24a15bc2258c20f8e4e34d9f379895c2d022d3974287cd55ddc4082f8c233210cfa97357fdf5b62dd61e1500"/124, @ANYRES16=r3, @ANYBLOB="01002dbd7000fcdbdf2501000000050038000100000008000b0006000000"], 0x24}, 0x1, 0x0, 0x0, 0x24000850}, 0x40) (async, rerun: 32) sendmsg$nl_route(r1, 0x0, 0x0) (rerun: 32) 10:42:01 executing program 2: sendmsg$TIPC_CMD_SET_LINK_PRI(0xffffffffffffffff, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x200000}, 0xc, &(0x7f00000000c0)={&(0x7f0000000080)={0x30, 0x0, 0x200, 0x70bd2b, 0x25dfdbfe, {{}, {}, {0x14, 0x18, {0x6, @bearer=@udp='udp:syz2\x00'}}}, ["", ""]}, 0x30}, 0x1, 0x0, 0x0, 0x80}, 0x40) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r1, 0x0, 0x8000000000004) sendmsg$NFNL_MSG_CTHELPER_DEL(r1, &(0x7f0000000240)={&(0x7f0000000140)={0x10, 0x0, 0x0, 0x10}, 0xc, &(0x7f0000000200)={&(0x7f0000000180)={0x64, 0x2, 0x9, 0x301, 0x0, 0x0, {0x2, 0x0, 0x1}, [@NFCTH_STATUS={0x8, 0x6, 0x1, 0x0, 0x1}, @NFCTH_PRIV_DATA_LEN={0x8, 0x5, 0x1, 0x0, 0x1e}, @NFCTH_TUPLE={0x18, 0x2, [@CTA_TUPLE_IP={0x14, 0x1, 0x0, 0x1, @ipv4={{0x8, 0x1, @local}, {0x8, 0x2, @broadcast}}}]}, @NFCTH_POLICY={0xc, 0x4, 0x0, 0x1, {0x8, 0x1, 0x1, 0x0, 0xffffffff}}, @NFCTH_POLICY={0xc, 0x4, 0x0, 0x1, {0x8, 0x1, 0x1, 0x0, 0x10001}}, @NFCTH_QUEUE_NUM={0x8, 0x3, 0x1, 0x0, 0xff}, @NFCTH_PRIV_DATA_LEN={0x8, 0x5, 0x1, 0x0, 0x1}]}, 0x64}, 0x1, 0x0, 0x0, 0x4004080}, 0x800) sendmsg$TIPC_CMD_SET_LINK_PRI(0xffffffffffffffff, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x200000}, 0xc, &(0x7f00000000c0)={&(0x7f0000000080)={0x30, 0x0, 0x200, 0x70bd2b, 0x25dfdbfe, {{}, {}, {0x14, 0x18, {0x6, @bearer=@udp='udp:syz2\x00'}}}, ["", ""]}, 0x30}, 0x1, 0x0, 0x0, 0x80}, 0x40) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) sendfile(0xffffffffffffffff, r1, 0x0, 0x8000000000004) (async) sendmsg$NFNL_MSG_CTHELPER_DEL(r1, &(0x7f0000000240)={&(0x7f0000000140)={0x10, 0x0, 0x0, 0x10}, 0xc, &(0x7f0000000200)={&(0x7f0000000180)={0x64, 0x2, 0x9, 0x301, 0x0, 0x0, {0x2, 0x0, 0x1}, [@NFCTH_STATUS={0x8, 0x6, 0x1, 0x0, 0x1}, @NFCTH_PRIV_DATA_LEN={0x8, 0x5, 0x1, 0x0, 0x1e}, @NFCTH_TUPLE={0x18, 0x2, [@CTA_TUPLE_IP={0x14, 0x1, 0x0, 0x1, @ipv4={{0x8, 0x1, @local}, {0x8, 0x2, @broadcast}}}]}, @NFCTH_POLICY={0xc, 0x4, 0x0, 0x1, {0x8, 0x1, 0x1, 0x0, 0xffffffff}}, @NFCTH_POLICY={0xc, 0x4, 0x0, 0x1, {0x8, 0x1, 0x1, 0x0, 0x10001}}, @NFCTH_QUEUE_NUM={0x8, 0x3, 0x1, 0x0, 0xff}, @NFCTH_PRIV_DATA_LEN={0x8, 0x5, 0x1, 0x0, 0x1}]}, 0x64}, 0x1, 0x0, 0x0, 0x4004080}, 0x800) (async) [ 2185.306074][ T7900] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:42:01 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) pipe(&(0x7f0000000140)={0xffffffffffffffff, 0xffffffffffffffff}) close(r1) sendmsg$nl_route(r1, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x2}, 0xc, &(0x7f00000000c0)={&(0x7f0000000180)=ANY=[@ANYBLOB="200000006a00060328fd7000ffdbdf253fcb000000000000000028000a0000000000"], 0x20}, 0x1, 0x0, 0x0, 0x4080}, 0x84) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) write$binfmt_elf64(r2, &(0x7f00000001c0)={{0x7f, 0x45, 0x4c, 0x46, 0x7f, 0x4, 0x7, 0x9, 0x1e2, 0x7, 0x3, 0xe9, 0x288, 0x40, 0x304, 0x3, 0x800, 0x38, 0x1, 0x0, 0xb219, 0x1ff}, [{0x4, 0x200, 0x8000, 0x1, 0x80000000, 0x2, 0x427, 0x3ff}, {0x2, 0x40, 0x7, 0x8, 0x96b, 0x8000000000000000, 0x6, 0x9}], "b56ab07e7f316ae9fbc9528218a1b3c9c17104349b7575d43e692ac16e209ef3792e0533d4d34157901e2ea15817ab18c7a24ee72f486ece10682ec88489741d62f1709475d3ae437eed3060806d12fbb807996c36b55f580bb51aad00ecb55c14a7f7603237900555419ba878fb5e6fc9e4b453d0f668e7e3f2133227", ['\x00', '\x00', '\x00']}, 0x42d) [ 2185.348397][ T7868] bond1407: (slave bridge1272): making interface the new active one [ 2185.379250][ T7905] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2185.406379][ T7868] bond1407: (slave bridge1272): Enslaving as an active interface with an up link [ 2185.479465][ T7881] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2185.485822][ T7912] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2185.636353][ T7881] 8021q: adding VLAN 0 to HW filter on device bond849 [ 2185.717595][ T7884] bond849: (slave bridge1005): making interface the new active one [ 2185.730645][ T7884] bond849: (slave bridge1005): Enslaving as an active interface with an up link 10:42:01 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xea020000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:42:01 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r1 = socket$netlink(0x10, 0x3, 0x0) connect$llc(r0, &(0x7f0000000000)={0x1a, 0x207, 0xff, 0x23, 0x8, 0x3f, @multicast}, 0x10) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r1, 0x0, 0x0) 10:42:01 executing program 4: openat$cgroup(0xffffffffffffffff, &(0x7f0000000000)='syz1\x00', 0x200002, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r1 = openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r1, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r1, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r1, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r1, 0x0, 0x0) r2 = openat$cgroup_ro(r1, &(0x7f0000000080)='cpuacct.usage_user\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r2, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:01 executing program 2: sendmsg$TIPC_CMD_SET_LINK_PRI(0xffffffffffffffff, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x200000}, 0xc, &(0x7f00000000c0)={&(0x7f0000000080)={0x30, 0x0, 0x200, 0x70bd2b, 0x25dfdbfe, {{}, {}, {0x14, 0x18, {0x6, @bearer=@udp='udp:syz2\x00'}}}, ["", ""]}, 0x30}, 0x1, 0x0, 0x0, 0x80}, 0x40) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r1, 0x0, 0x8000000000004) sendmsg$NFNL_MSG_CTHELPER_DEL(r1, &(0x7f0000000240)={&(0x7f0000000140)={0x10, 0x0, 0x0, 0x10}, 0xc, &(0x7f0000000200)={&(0x7f0000000180)={0x64, 0x2, 0x9, 0x301, 0x0, 0x0, {0x2, 0x0, 0x1}, [@NFCTH_STATUS={0x8, 0x6, 0x1, 0x0, 0x1}, @NFCTH_PRIV_DATA_LEN={0x8, 0x5, 0x1, 0x0, 0x1e}, @NFCTH_TUPLE={0x18, 0x2, [@CTA_TUPLE_IP={0x14, 0x1, 0x0, 0x1, @ipv4={{0x8, 0x1, @local}, {0x8, 0x2, @broadcast}}}]}, @NFCTH_POLICY={0xc, 0x4, 0x0, 0x1, {0x8, 0x1, 0x1, 0x0, 0xffffffff}}, @NFCTH_POLICY={0xc, 0x4, 0x0, 0x1, {0x8, 0x1, 0x1, 0x0, 0x10001}}, @NFCTH_QUEUE_NUM={0x8, 0x3, 0x1, 0x0, 0xff}, @NFCTH_PRIV_DATA_LEN={0x8, 0x5, 0x1, 0x0, 0x1}]}, 0x64}, 0x1, 0x0, 0x0, 0x4004080}, 0x800) sendmsg$TIPC_CMD_SET_LINK_PRI(0xffffffffffffffff, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x200000}, 0xc, &(0x7f00000000c0)={&(0x7f0000000080)={0x30, 0x0, 0x200, 0x70bd2b, 0x25dfdbfe, {{}, {}, {0x14, 0x18, {0x6, @bearer=@udp='udp:syz2\x00'}}}, ["", ""]}, 0x30}, 0x1, 0x0, 0x0, 0x80}, 0x40) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) sendfile(0xffffffffffffffff, r1, 0x0, 0x8000000000004) (async) sendmsg$NFNL_MSG_CTHELPER_DEL(r1, &(0x7f0000000240)={&(0x7f0000000140)={0x10, 0x0, 0x0, 0x10}, 0xc, &(0x7f0000000200)={&(0x7f0000000180)={0x64, 0x2, 0x9, 0x301, 0x0, 0x0, {0x2, 0x0, 0x1}, [@NFCTH_STATUS={0x8, 0x6, 0x1, 0x0, 0x1}, @NFCTH_PRIV_DATA_LEN={0x8, 0x5, 0x1, 0x0, 0x1e}, @NFCTH_TUPLE={0x18, 0x2, [@CTA_TUPLE_IP={0x14, 0x1, 0x0, 0x1, @ipv4={{0x8, 0x1, @local}, {0x8, 0x2, @broadcast}}}]}, @NFCTH_POLICY={0xc, 0x4, 0x0, 0x1, {0x8, 0x1, 0x1, 0x0, 0xffffffff}}, @NFCTH_POLICY={0xc, 0x4, 0x0, 0x1, {0x8, 0x1, 0x1, 0x0, 0x10001}}, @NFCTH_QUEUE_NUM={0x8, 0x3, 0x1, 0x0, 0xff}, @NFCTH_PRIV_DATA_LEN={0x8, 0x5, 0x1, 0x0, 0x1}]}, 0x64}, 0x1, 0x0, 0x0, 0x4004080}, 0x800) (async) 10:42:01 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xe4ffffff}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2185.758845][ T7887] netlink: 'syz-executor.3': attribute type 1 has an invalid length. 10:42:01 executing program 4: openat$cgroup(0xffffffffffffffff, &(0x7f0000000000)='syz1\x00', 0x200002, 0x0) (async) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r1 = openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r1, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) (async) ioctl$FS_IOC_RESVSP(r1, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r1, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) (async, rerun: 32) write$tun(r1, 0x0, 0x0) (async, rerun: 32) r2 = openat$cgroup_ro(r1, &(0x7f0000000080)='cpuacct.usage_user\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r2, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2185.831160][ T7920] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:42:02 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (async) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) r1 = socket$netlink(0x10, 0x3, 0x0) connect$llc(r0, &(0x7f0000000000)={0x1a, 0x207, 0xff, 0x23, 0x8, 0x3f, @multicast}, 0x10) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) sendmsg$nl_route(r1, 0x0, 0x0) [ 2186.099156][ T7887] 8021q: adding VLAN 0 to HW filter on device bond1445 [ 2186.332613][ T7899] bond1445: (slave bridge1343): making interface the new active one [ 2186.358516][ T7899] bond1445: (slave bridge1343): Enslaving as an active interface with an up link 10:42:02 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xffffff7f}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:02 executing program 2: r0 = bpf$ITER_CREATE(0x21, &(0x7f0000000200), 0x8) r1 = openat$cgroup_ro(r0, &(0x7f0000000040)='pids.current\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) bpf$BPF_BTF_LOAD(0x12, &(0x7f0000000180)={&(0x7f0000000080)={{0xeb9f, 0x1, 0x0, 0x18, 0x0, 0xc, 0xc, 0x3, [@func={0x7, 0x0, 0x0, 0xc, 0x5}]}, {0x0, [0x2e]}}, &(0x7f00000000c0)=""/178, 0x27, 0xb2, 0x1}, 0x20) 10:42:02 executing program 4: openat$cgroup(0xffffffffffffffff, &(0x7f0000000000)='syz1\x00', 0x200002, 0x0) (async) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r1 = openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r1, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) (async) ioctl$FS_IOC_RESVSP(r1, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r1, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) (async) write$tun(r1, 0x0, 0x0) (async) r2 = openat$cgroup_ro(r1, &(0x7f0000000080)='cpuacct.usage_user\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r2, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:02 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async, rerun: 64) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (async, rerun: 64) accept(0xffffffffffffffff, 0x0, 0x0) (async) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) r1 = socket$netlink(0x10, 0x3, 0x0) connect$llc(r0, &(0x7f0000000000)={0x1a, 0x207, 0xff, 0x23, 0x8, 0x3f, @multicast}, 0x10) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async, rerun: 32) sendmsg$nl_route(r1, 0x0, 0x0) (rerun: 32) [ 2186.378679][ T7918] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 10:42:02 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = openat$cgroup_int(0xffffffffffffffff, &(0x7f0000000000)='cpuset.memory_migrate\x00', 0x2, 0x0) ioctl$BTRFS_IOC_SUBVOL_SETFLAGS(r1, 0x4008941a, &(0x7f0000000080)) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2186.468308][ T7918] 8021q: adding VLAN 0 to HW filter on device bond1408 [ 2186.546608][ T7968] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2186.661384][ T7928] bridge1273: entered promiscuous mode [ 2186.672007][ T7928] bridge1273: entered allmulticast mode [ 2186.771481][ T7940] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2186.790809][ T7940] workqueue: Failed to create a rescuer kthread for wq "bond850": -EINTR [ 2186.899236][ T7962] netlink: 'syz-executor.3': attribute type 1 has an invalid length. 10:42:03 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xea030000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:42:03 executing program 2: r0 = bpf$ITER_CREATE(0x21, &(0x7f0000000200), 0x8) openat$cgroup_ro(r0, &(0x7f0000000040)='pids.current\x00', 0x275a, 0x0) (async) r1 = openat$cgroup_ro(r0, &(0x7f0000000040)='pids.current\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) bpf$BPF_BTF_LOAD(0x12, &(0x7f0000000180)={&(0x7f0000000080)={{0xeb9f, 0x1, 0x0, 0x18, 0x0, 0xc, 0xc, 0x3, [@func={0x7, 0x0, 0x0, 0xc, 0x5}]}, {0x0, [0x2e]}}, &(0x7f00000000c0)=""/178, 0x27, 0xb2, 0x1}, 0x20) 10:42:03 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) r1 = openat$cgroup_int(0xffffffffffffffff, &(0x7f0000000000)='cpuset.memory_migrate\x00', 0x2, 0x0) ioctl$BTRFS_IOC_SUBVOL_SETFLAGS(r1, 0x4008941a, &(0x7f0000000080)) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:03 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) r0 = accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r1 = socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$nl_generic(0x10, 0x3, 0x10) ioctl$sock_SIOCGIFINDEX_80211(r1, 0x8933, &(0x7f0000000480)={'wlan1\x00', 0x0}) r4 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000180), 0xffffffffffffffff) sendmsg$NL80211_CMD_REMAIN_ON_CHANNEL(r2, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000100)={&(0x7f0000000040)=ANY=[@ANYBLOB=',\x00\x00\x00', @ANYRES16=r4, @ANYBLOB="01000000000000000000370f000008000300", @ANYRES32=r3, @ANYBLOB="080026006c0900000800570080"], 0x2c}}, 0x0) sendmsg$NL80211_CMD_FRAME(r0, &(0x7f0000000080)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x20}, 0xc, &(0x7f0000000040)={&(0x7f0000000400)={0x548, r4, 0x2, 0x70bd27, 0x25dfdbfb, {{}, {@void, @void}}, [@NL80211_ATTR_FRAME={0x4f2, 0x33, @reassoc_resp={@wo_ht={{0x0, 0x0, 0x3, 0x0, 0x0, 0x1, 0x0, 0x0, 0x1}, {0x1}, @device_b, @broadcast, @initial, {0x2, 0xb90}}, 0x4008, 0x31, @default, @void, @void, [{0xdd, 0xdd, "893bddf4f39fe23047d81def36c83ea7dc86cbe2f772c49c22fe49425e1088de8158ffa6463c6cb92125eb864dabe27eb9a255a02aa68cff2421aac59af58439cc2aea96e0da07554c5e9fe7993507e908e173468d8f854a9abf6e03bb5f333a1457712a32fd8e761ed8f3fefa388090ea0ac5c8c31a41af9bb575c8c7d1280985a91a9fc9e05a54be6c61f0b0d74ee93233d805a69957df477a33d9be27e721196fcfebcd8995ba3a3d7ffee28e15deb87b04ae17427585b1add813ef79fe81ef11953cf553ee115bb11eb2b7724aeac3a67b7f922f46abe0ac104e33"}, {0xdd, 0x4f, "d14dcc51ed6efd00750ed5aa5e4911bc07848c25253496e60b0cd2b3ca5b6f13f89915809d3074f057996254ffebbc830e31ed205e01cfebcc4df82980a796fe2b5bce11f53f33339dc3e71349b025"}, {0xdd, 0x11, "8412ce67132c319517c43df8abd762d417"}, {0xdd, 0xfe, "afa2dfa9b948e8520bfdfce1580237969efc0e6637978612a367ea7db0b6cc7d85d7dd928e070b9420e7769a549f657f9c4c6cf45d883df0abe51115ab78dafef03f1305dbcec17db22b8414358758c78f8ef8ca3779c2b9d5450610d2265fef70395ee8d0a706f2a032409e575aa19e20f3c9fec89cd2c03ff3d339eee7ba8365fdaf06da6f664ddc47d441edfaa9a5123c639a4dbf118ed32d17365381f26af2fe5c3daffd4c973357290e6066389f8c8eae15b4f4052b9fcf060f7b8059e46b4ca7413d103db7a83c2278e0f0db436b7620d98ceea5e40fa1235d11db1c713c746b4c34c0c4f23a4186caad6c8ca9021593a465322f6e1c3b74e465e5"}, {0xdd, 0xbd, "57130242ab769d9a05497e2b28585843152c3b0fa845362b9539d9d54ba11ac7e6d52b566d60c5b97fedb660a728901aa8d526d5ec179473a14075419bdbd139bd71577baa331f6f658866b6d61e66fe1460bdcb76c446b67f566fab16615f1ddced97b2d1afc9a3de2779575fb6e53a0bf8cdec8c06392db58ab2c88a5730143eccbdfcc3d4525129fe9802b201fc3301b225cea1418e5661f1eee1b7db04f9d0a8c48bfd9660770661f03bfa1a494a3a798f166e2433b94dc6f9f92e"}, {0xdd, 0xe, "c7cb2d1497831b0a574b0bbe92c3"}, {0xdd, 0x7a, "f97f542c893c39984db8b5d544ca54c7273e8b4461efced3489fdda30b631d1c207c9b6728e13a0b8f531b9ada8aa97823694ec47c402b23bf00098b1a46e11b63e6b50d07e5b0939e5effc859cd489baf8e8f0f9301ba3ac626fa344d71069333ce55d615f6c4e5dabcc755e75cc4b76bbd391e9d3d9df6bfc4"}, {0xdd, 0xc6, "effe6a53891cb8a3dff6d13aa0719626ce09ddbe1906b3c8cf43b360ae71e3ad005c2b43242f39e452318d5bd72f23dd3bbc179e86d4f8212721a34fdf522e0af37d8032294539b38038623fa910dcb0f23b46068ef3901ef3bada7627753a7f6b93f19be03e4eb76ca67d1733179aea8a83a9234a361d60c566faeb1e549bcd0fc124c594cae657c30fdd9139f2791628cc1922d3efbb7965840dd93ab35f7b5606b76175fe9cd2351de60a9c20cf0efd627959d07ae7ff5c38641aefb954396718588dd75d"}, {0xdd, 0x78, "1031333a81626f8693885ca0f6f3c4b022d30b7e4fdb0bc44468743b7d35f3a3a3dd98d80217a589e571e0678e1ac94f384baf882854fee971575bd1ef698db97811db25aa370b1246126d963492553ee43f5065c8c9ee6ae33518cdc24ce9f4657c8d36f913fd74e6325799928ffe9ed11e44bb7f58b4f7"}]}}, @NL80211_ATTR_CSA_C_OFFSETS_TX={0xc, 0xcd, [0xe9, 0x9, 0x6, 0x3f]}, @chandef_params=[@NL80211_ATTR_CENTER_FREQ1={0x8, 0xa0, 0x5}, @NL80211_ATTR_WIPHY_EDMG_BW_CONFIG={0x5, 0x119, 0xa}, @NL80211_ATTR_WIPHY_FREQ_OFFSET={0x8, 0x122, 0x6f}], @NL80211_ATTR_CSA_C_OFFSETS_TX={0xe, 0xcd, [0xff01, 0x1, 0xe, 0x2, 0xc98]}, @NL80211_ATTR_DONT_WAIT_FOR_ACK={0x4}, @NL80211_ATTR_CSA_C_OFFSETS_TX={0x6, 0xcd, [0xc58]}]}, 0x548}, 0x1, 0x0, 0x0, 0x4000004}, 0x1) r5 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r5, 0x0, 0x0) 10:42:03 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xea020000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2187.047871][ T7962] 8021q: adding VLAN 0 to HW filter on device bond1446 [ 2187.056440][ T7980] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2187.076512][ T7969] bridge1344: entered promiscuous mode [ 2187.082273][ T7969] bridge1344: entered allmulticast mode 10:42:03 executing program 2: r0 = bpf$ITER_CREATE(0x21, &(0x7f0000000200), 0x8) r1 = openat$cgroup_ro(r0, &(0x7f0000000040)='pids.current\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) bpf$BPF_BTF_LOAD(0x12, &(0x7f0000000180)={&(0x7f0000000080)={{0xeb9f, 0x1, 0x0, 0x18, 0x0, 0xc, 0xc, 0x3, [@func={0x7, 0x0, 0x0, 0xc, 0x5}]}, {0x0, [0x2e]}}, &(0x7f00000000c0)=""/178, 0x27, 0xb2, 0x1}, 0x20) bpf$ITER_CREATE(0x21, &(0x7f0000000200), 0x8) (async) openat$cgroup_ro(r0, &(0x7f0000000040)='pids.current\x00', 0x275a, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) bpf$BPF_BTF_LOAD(0x12, &(0x7f0000000180)={&(0x7f0000000080)={{0xeb9f, 0x1, 0x0, 0x18, 0x0, 0xc, 0xc, 0x3, [@func={0x7, 0x0, 0x0, 0xc, 0x5}]}, {0x0, [0x2e]}}, &(0x7f00000000c0)=""/178, 0x27, 0xb2, 0x1}, 0x20) (async) 10:42:03 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = openat$cgroup_int(0xffffffffffffffff, &(0x7f0000000000)='cpuset.memory_migrate\x00', 0x2, 0x0) ioctl$BTRFS_IOC_SUBVOL_SETFLAGS(r1, 0x4008941a, &(0x7f0000000080)) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2187.231752][ T7977] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2187.259742][ T7992] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:42:03 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xffffff9e}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:03 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = socket$nl_xfrm(0x10, 0x3, 0x6) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r2 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) sendmsg$NL80211_CMD_NEW_MPATH(r0, &(0x7f0000000440)={&(0x7f0000000180)={0x10, 0x0, 0x0, 0x10000}, 0xc, &(0x7f00000002c0)={&(0x7f00000003c0)={0x70, 0x0, 0xf8a7ca0f2a3df9b1, 0x70bd27, 0x25dfdbfc, {{}, {@val={0x8}, @val={0xc, 0x99, {0x40, 0x68}}}}, [@NL80211_ATTR_MPATH_NEXT_HOP={0xa, 0x1a, @broadcast}, @NL80211_ATTR_MPATH_NEXT_HOP={0xa, 0x1a, @broadcast}, @NL80211_ATTR_MAC={0xa, 0x6, @broadcast}, @NL80211_ATTR_MPATH_NEXT_HOP={0xa, 0x1a, @device_b}, @NL80211_ATTR_MAC={0xa, 0x6, @broadcast}, @NL80211_ATTR_MPATH_NEXT_HOP={0xa, 0x1a, @broadcast}]}, 0x70}, 0x1, 0x0, 0x0, 0x20044800}, 0x40004) r3 = openat$cgroup_ro(r2, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r3, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r3, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r3, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r3, 0x0, 0x0) sendmsg$nl_xfrm(r3, &(0x7f0000000140)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f0000000100)={&(0x7f0000000080)=ANY=[@ANYBLOB="4c0000001f0010002dbd7000ffdbdf25e0000002000000000000000000000000000004d50a003300fe80000000000000000000000000005d07350000000100"/76], 0x4c}, 0x1, 0x0, 0x0, 0x804}, 0x2000c0c1) 10:42:03 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000000)='blkio.throttle.io_serviced_recursive\x00', 0x275a, 0x0) readv(r0, &(0x7f0000000280)=[{&(0x7f0000000040)=""/233, 0xe9}, {&(0x7f0000000140)=""/94, 0x5e}, {&(0x7f00000001c0)=""/114, 0x72}, {&(0x7f0000000240)=""/50, 0x32}], 0x4) mmap(&(0x7f0000ffa000/0x3000)=nil, 0x3000, 0xa, 0x110, r0, 0xffa75000) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0x1f87f9b6) ioctl$BTRFS_IOC_INO_LOOKUP_USER(r0, 0xd000943e, &(0x7f00000002c0)={0x0, 0x0, "6fdc2659a13ec0f522902e8011a17ecf42109222a9dc210ffa2eb5176a136065325e1f8f1eecd84eede16798280562a09541b76c96e580fc958371f8926c91deb0ca1a4dd367b7e9fa951318aa01e20b5640aae27ba20e1bca56062b9843ea1672f9ee55fcb566f3319ffc6684a05e2cc529e1e64ce327434aee0771b8a5d7462c040334bfcd63183130d457522d249038ed6d7dfa93f1b1e6637bb61f70913793dbaf0ef439311c455499771925a6a5a6384a31e6528ef61d5a9518a63d7fb626e204b2dae1896b60a17aa570d3fe141e8ee7a373fbb7c6852f5c427c7bc0e0f80a80b5058f388c77b53aaf24fe39e877efa7206d3468fa9221ff69ece91c43", "2e1c8f7fe9679c33d9307a48b3d8855351c6c1d1fe427e217eb76967bb3b15b8bd728cbf2a0efae6b4248fd513868ca124942bcc1b59138251be9806d4054a53ea3704f50f706b73bdfb2487c746dc22018b3d160e08f44e4b010b8716a972b7255b4a88d0cbd92f43dcd329bc106fb6f7a97d255af7647b61bd15af14b387ced234ff9710161730aeaf621875c9fde2951fcd8e57583addb33392a6c869c2d2e29ba5f528933690be30726c17863b72100971963e9a4adad3a5316d34cff21600aeb45bb4fe5dd2f695e3de374f63a4db8922c039c448a668c971c2cd092d3232c7e18418744fe9d8749f1965400d4841e07de908d9fe4d9d60736c23c5204d15f2e0b2932dc85a92c4f0731dbaf141014419be4fad839f2353b532d7a0a6591b17d4292bf7c8f5ae0b5cdf2b18e665f189284d8bfaba1b23aacda193d237390d69c6ff6762fb972a87cd802852b87306d86ee4c22af8391f47e164f7a46c35c33f4c32389f36755fec7fdbca273b9560017c5983fe80f199563c385c1b30fed4e481cb133c5efda36620b544566af365b7b5fc2d948710c21b4140c93fe44ab6fcd7b689ed6b5388b5169dfc0813a54e84aac1c07e996128011a89de1175906fc56f64c7dd97b15f74035d335eb1d962e4d5f7a5a306cf8b9c6d290a3b529764a9ed3af169999ff896ee01fbba76ab2cc6b8936a7cef06094805d7ad41f01c3d7a11992e5d737667c20718135b0f52346780d6cfeec3e000cf44a799ffbea41be2ce841ef128290ff751c950658849cd17ce9bae5b530b712acb3cd835989c6a80c2b5c540356a4022e4b519827958a741d1c1928962658313a311ff9eb9e7909657584c5f5ed1699a8f943c7df8befc84104a75b7c0b42cc8543a16dfdff907edc1a6677de53515292b50bee9bc559200b09d2526beafbb3c6df4ba8ea9552a69adc56c9807450ce7a30987a661e5d6f313e5bf7d3bb755933c2c466fc03900980d7a8c9aaedd3dfc2ad7994dc86c3aecebca5b2c58f86f74231810bb2d840e617be60ebdd3b57eedb4e2f2081de8b6323844922c474dd38b3cb66abd8d5846d2bbe026a00cad367cb51d5ee8b30025ae1050347937461b98303ce806711000e9c335d52cc0ba75e3ad7bf6fca49a2e3ea30749669791f381b91ed27e97bb11af9be70ffe442b7f1144317de67eda7ce86aca8ec38ed3cab38f077a3ad08f4a87f364ef5ae3f4297486c0cbf1e0bb2ca954a706d0fe81251ecc6635e3d713d5d0ae3c423ead6fbf89a87f52fd44d0832f3a4da1229908dfe2cbbf22f622c9be5900180021bee83688bd95477302b02d994e70f065d1278ced4b9aadf4db252423264c85f5f39ba3d56e06538204213b7df290780c8b4733793587f3ca4b9d50bcb8fe06ba29fc5f8f29686e7baf923cc35da97b95ce96d0c02e7e22d653d4e48ee8c131fe80cf02e92839bf7e63dbddf0937b5fc00135af5006932fd47922c69513d82ecb2b97bb1ff6649b99e0d4f5d5e069d8dcfb4c56e30db8b6499409441b789571e3059028e283b5d94929c481c9420663fe044e7485e9f1726e02bb548e8298270139f596d1cca2628a8c04f6cb5ec352b23ae4c36e1b20c08a96fc9efa2d72992efa0e7877e81dccbd2ca42aa36b5d9795491e998bc681b4eb99af5146e5008dd78cd92ac398e4429c4eca0b901b92a2628641fcda99bd34015a924685f29bcf4e373bc11c818818a5b2d7aa356e9b0445aa2063f81085e6337c0b0efd24590e5d41a0715fc9f370af1fde9d0159dd25c46ae538cc84e4f617d1dafce988ca4282dbda7d74cab62483bc2ac8f773a83f6db5d5633f166fa880b76374eb6b1f150d9d3f31da28a0e1f741b30eb15f6f0587c49a0bde370720c81eee3670bcef26ac917a3a0bc600c0d4314b3b7d5f0bc2b930d91b6599ed7366a270963daa69a670ac3aca29a348da256e276a65dfb7125f48558ec70db4a9d49b8cedba0034f0136cb0c870e9111c6424c43a420277d931c0ed8f3296bb9c63783e54c68b7b5bdcc1c930ddbc446733c61698bb1073c31caa568dbacb131e132b9952fff40bec8656b0b5839b65af15bd873b9a2df86bcdd05c5a8e83b66befb987a85871b0dab1b46aa90c7862822b04ff3b494ac57ba76621833d1fecea9379dd389e1959ffe3ada16ef562cb58f8c7fe744b59782f14aa68c7c80e78aa00a9e04dee44f99e330859aad2466af2ada1f3e0667ce42ca155d1b966611b6a2c94926fc6b065292f8323293546a582d05b5ab2931a3785f542623b6989e8d714a815d9243e910a428833c3449e586cd64280612ab8092aa8a55b8bd0bdd21c534734bd7d3e0d823df70374842ba84e89a89062ce23d810ec60c394d5289f5ab16d69e0b3d7745adfc2e8cf12e76f04c528fa0796ccee7a5f2a364e1ace2fb26aee1aa41b543875fc124028595d2e9daa182264b1c247bd276a1a1ed0a35bef45115b81690dc62765dc49d342ac4f749894e42b14b5094623d04ef66f8d2d4e1cdb55452049d89d078191e51c216188385d846d5430bbbc8525a97b08b397cdb28b0e8466e629c7664f6c844851271e5cd85315bb8a483f48536b52ba572c9d6eb8aff004bf8a416e41c631835ad1cb890b3293c9a4d5299314bbf0ae1a3dbd54aa58443a5e5cba60cfbfd6051fad6b933e700d4dcc7efebf17ec72d9b447d82439b66bcc49ce16c8e7d635b3719eca5d0656e61cec439447d802e2ab8b4c716c78de011ebe4a84a2679ace5ad5e9738cfca9e640bf992d8bb8dd4d69005bd5a736bf37b100735e2699f560f162a9542192588084d26698bd52a007350d0602393706dc64e5b25b2ad8338b07e2c0bf140a4c9d83e0e6f7e7536c7b69fd48b3ecd6e8aab68f10e508ed002bb3b4f5601432741156940cf4475d0e4bb02f4074b4d2b083625f1d421236da2c176d62ac8a3ac6a9b15e52f797c2a9fa08ffbc17d7e480ac876c64f9f3b3fc78ca1dda58a16ef4f93aca3b32a890c503506f4564446743d0ad22a3b9d63583b1527a723684521fa39db38d7064c89688e57ecaae15738c5f51466ab10619c2beb944a6f47a171a1de7b0766f6b9aa13d97cfdbcb5eef761c96811673176a013829510e1ccdc72a721b55433fc704a2045742adfe1ca2d5cd25d1c9dbb6be7b22ecc16d31172797e9efbb2b1b048224c61dc47ffc9c564fd6be9154a48fc1c62433de5c90abca99d5003df69ebbed6c08b4a73d712d7debff75f83bc6b9d0237f63d8020edbe6de7f77a50a70103e3b1afae2665f4e2fd78f658f0a0907888567a8dfcdeb8dd6366245aa1d45ac2d54abd0725ca8bee6bb30b67885b7d3e196eca7577354abbc0f956190b288c942a96c05f76cf634f7b41542126742b33a75e125b2f87ec2c9d4970169de28de80b08b64e94725c193d6858ac77bf4c0d14ddfd90c9c2786688948ea9a85b305726b7c15e6e215ad2a071fdd72f2e4b132cb2498689a64d791833fc16cabaa87d35ab052670e62c7250b65d0780fa30e86dba82405ae403c8db0229aa4d089f3a23155d6339a916bce926433bed1f403dbb8d837bc8edc03f23c6bc202269b7c92a472b07f50c90989cbb7934ac2980556bbc6de9d1bc96dd7632bbe0f25c67e971d8c80beaeaa459b1d8f80ea043009059e2d7da33688f1a7b05b61cc28c75ca78fbb75afe1062293b39b48f805471ddfb85c8e903270b20a843f109d6f6d0a3f5cffeaf00a602b9467c6247276bb75ca6d5c20fb578128a8080ba94f07afa0c28526106627ac50724c37f61eabf15fb9acc7f9c796d25f883c88e4dcbe13b669cb55df9623740b061511b44f93796601ea359af03e8483a12341ea18a59b7477361c94ddcddfcb1a623061642582df0b85ad2ac59d93b5e6c9a9185ee8746ba50fc026eeb3c3b4c8ed02680556d6a2b560eb997d2815a886eb7a15aa2e3d8ea4184fa81288edcc790fd3ecd8753e067ecbcdc4bf9c1d9033fce70cd689899da85a13088b9b376b43a589fd88eadc85faba0a4dedf0c5d1dd850d4bab0608b32ecbbcf2fc9fd3de407d6eedb670b88922ccc39ad1408cd0bb68b3d83e97c13589e86c03cb0260ee9e0af9374c63293d2dbfc0d8edfee6460913de6c2fe286f9b1e5673f875aa6b1ff376e9ab3590e3726292e05dd51b48386d592e21463581eb3647d3a556f54b81bb8f200699491f44d5673ac52ebcf301b9b76a267d75cbb106ef02d06e6727b38b76554525b59d5ea585cce8c5b8fa8a0b26399408d03029096e99771ea5f268c6b54709452a845b885e9cdc315392c3e11d79ceeb85b490e3276d17c92d63fafa5c8ec088ee07059c9b0b842775f7e4fb8d03f9882fd11612e1d34d22263439c99cd601c79980f78507e18b6ccd8c51b653f2dcd11de7eb74f903ee87e5b946441b8cc1d9f09e36b06027980d9a2c51bac40711352ed4a2d957d43eb9a49169c72fbfd9ad79c17f14fc7091e94703752c9c9085d12e1c15bb93f7f86974382649994c8705fec8c97df620b75104c4d615b983ac37bb5b5c7e29edee7e4734737211d8ae85c8038d60ff30d6cc6225f3d5ac99070d79fd1514c4163b7b3166397e2c0e918224d27296d8fec84880e1a844360b55b01fc1b5b1169c2f539ab02f5d8a9814432ecbcaf578fb12d444ae762144f302fa70912110e647ff1372140414fc6262f671f5d3444cc661000167862f190e192a30357a7755d8e8522427ae0801a35641a6208960e58e3d266d7490c46b22b4890e53d1f39776dcc7a80a1b21dce4483a4b494dfa2e90e66fe26e17fdf5762fa5a2eaefcbbae15016ea51cd5ef9819e751586b15fb593cdf092b7d13847fd28723744572055853a55600b3dcdbc651f33998dc7b65072c6f4c69210772e43fce79131a5854d15eab49ee8dcbd0484eab7b05d8828b7d701e74b52fb0ebf95b21a9910a204313be10ccb4c069379215f39a14cd9011242e629efa1f77764eefcc9f1cff0c90ac17462a390e52f3de140e693923448a164713aabd31d787babde59c51a0212c92e52082fd659f5515b9c5c09b566984bab76ac1673ddcb8f12efe8c2330cb2571d34d837c4e2fc845e23ceb79edb00344eae915cbe32d44195ac2681cf29f80a6cacd318d63b461b7467ef1d69ec308063a8306a6beb28ab9bcfddfeec2c7567365a03d3dd68748cc5ff1dacb8878ad74f18c61edf5d4a52e319ce463b99a33302c5e424cc816e0e49b4c1f33ed96f11e0e207d1c1b181c099462d2b84f2b0ca3f0aacbe0ce62c03e01dfd85d7f215acd52374e0e0a390c415ca099c6336dffce680b688a52a1ace99433bec18359e84c32528003068a3240ef28d55b6f86cf90abc6a6205eb80c3ebe4fb82bb4de6e9c222793789a69ea"}) 10:42:03 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) r0 = accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) (async) r1 = socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$nl_generic(0x10, 0x3, 0x10) ioctl$sock_SIOCGIFINDEX_80211(r1, 0x8933, &(0x7f0000000480)={'wlan1\x00', 0x0}) r4 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000180), 0xffffffffffffffff) sendmsg$NL80211_CMD_REMAIN_ON_CHANNEL(r2, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000100)={&(0x7f0000000040)=ANY=[@ANYBLOB=',\x00\x00\x00', @ANYRES16=r4, @ANYBLOB="01000000000000000000370f000008000300", @ANYRES32=r3, @ANYBLOB="080026006c0900000800570080"], 0x2c}}, 0x0) sendmsg$NL80211_CMD_FRAME(r0, &(0x7f0000000080)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x20}, 0xc, &(0x7f0000000040)={&(0x7f0000000400)={0x548, r4, 0x2, 0x70bd27, 0x25dfdbfb, {{}, {@void, @void}}, [@NL80211_ATTR_FRAME={0x4f2, 0x33, @reassoc_resp={@wo_ht={{0x0, 0x0, 0x3, 0x0, 0x0, 0x1, 0x0, 0x0, 0x1}, {0x1}, @device_b, @broadcast, @initial, {0x2, 0xb90}}, 0x4008, 0x31, @default, @void, @void, [{0xdd, 0xdd, "893bddf4f39fe23047d81def36c83ea7dc86cbe2f772c49c22fe49425e1088de8158ffa6463c6cb92125eb864dabe27eb9a255a02aa68cff2421aac59af58439cc2aea96e0da07554c5e9fe7993507e908e173468d8f854a9abf6e03bb5f333a1457712a32fd8e761ed8f3fefa388090ea0ac5c8c31a41af9bb575c8c7d1280985a91a9fc9e05a54be6c61f0b0d74ee93233d805a69957df477a33d9be27e721196fcfebcd8995ba3a3d7ffee28e15deb87b04ae17427585b1add813ef79fe81ef11953cf553ee115bb11eb2b7724aeac3a67b7f922f46abe0ac104e33"}, {0xdd, 0x4f, "d14dcc51ed6efd00750ed5aa5e4911bc07848c25253496e60b0cd2b3ca5b6f13f89915809d3074f057996254ffebbc830e31ed205e01cfebcc4df82980a796fe2b5bce11f53f33339dc3e71349b025"}, {0xdd, 0x11, "8412ce67132c319517c43df8abd762d417"}, {0xdd, 0xfe, "afa2dfa9b948e8520bfdfce1580237969efc0e6637978612a367ea7db0b6cc7d85d7dd928e070b9420e7769a549f657f9c4c6cf45d883df0abe51115ab78dafef03f1305dbcec17db22b8414358758c78f8ef8ca3779c2b9d5450610d2265fef70395ee8d0a706f2a032409e575aa19e20f3c9fec89cd2c03ff3d339eee7ba8365fdaf06da6f664ddc47d441edfaa9a5123c639a4dbf118ed32d17365381f26af2fe5c3daffd4c973357290e6066389f8c8eae15b4f4052b9fcf060f7b8059e46b4ca7413d103db7a83c2278e0f0db436b7620d98ceea5e40fa1235d11db1c713c746b4c34c0c4f23a4186caad6c8ca9021593a465322f6e1c3b74e465e5"}, {0xdd, 0xbd, "57130242ab769d9a05497e2b28585843152c3b0fa845362b9539d9d54ba11ac7e6d52b566d60c5b97fedb660a728901aa8d526d5ec179473a14075419bdbd139bd71577baa331f6f658866b6d61e66fe1460bdcb76c446b67f566fab16615f1ddced97b2d1afc9a3de2779575fb6e53a0bf8cdec8c06392db58ab2c88a5730143eccbdfcc3d4525129fe9802b201fc3301b225cea1418e5661f1eee1b7db04f9d0a8c48bfd9660770661f03bfa1a494a3a798f166e2433b94dc6f9f92e"}, {0xdd, 0xe, "c7cb2d1497831b0a574b0bbe92c3"}, {0xdd, 0x7a, "f97f542c893c39984db8b5d544ca54c7273e8b4461efced3489fdda30b631d1c207c9b6728e13a0b8f531b9ada8aa97823694ec47c402b23bf00098b1a46e11b63e6b50d07e5b0939e5effc859cd489baf8e8f0f9301ba3ac626fa344d71069333ce55d615f6c4e5dabcc755e75cc4b76bbd391e9d3d9df6bfc4"}, {0xdd, 0xc6, "effe6a53891cb8a3dff6d13aa0719626ce09ddbe1906b3c8cf43b360ae71e3ad005c2b43242f39e452318d5bd72f23dd3bbc179e86d4f8212721a34fdf522e0af37d8032294539b38038623fa910dcb0f23b46068ef3901ef3bada7627753a7f6b93f19be03e4eb76ca67d1733179aea8a83a9234a361d60c566faeb1e549bcd0fc124c594cae657c30fdd9139f2791628cc1922d3efbb7965840dd93ab35f7b5606b76175fe9cd2351de60a9c20cf0efd627959d07ae7ff5c38641aefb954396718588dd75d"}, {0xdd, 0x78, "1031333a81626f8693885ca0f6f3c4b022d30b7e4fdb0bc44468743b7d35f3a3a3dd98d80217a589e571e0678e1ac94f384baf882854fee971575bd1ef698db97811db25aa370b1246126d963492553ee43f5065c8c9ee6ae33518cdc24ce9f4657c8d36f913fd74e6325799928ffe9ed11e44bb7f58b4f7"}]}}, @NL80211_ATTR_CSA_C_OFFSETS_TX={0xc, 0xcd, [0xe9, 0x9, 0x6, 0x3f]}, @chandef_params=[@NL80211_ATTR_CENTER_FREQ1={0x8, 0xa0, 0x5}, @NL80211_ATTR_WIPHY_EDMG_BW_CONFIG={0x5, 0x119, 0xa}, @NL80211_ATTR_WIPHY_FREQ_OFFSET={0x8, 0x122, 0x6f}], @NL80211_ATTR_CSA_C_OFFSETS_TX={0xe, 0xcd, [0xff01, 0x1, 0xe, 0x2, 0xc98]}, @NL80211_ATTR_DONT_WAIT_FOR_ACK={0x4}, @NL80211_ATTR_CSA_C_OFFSETS_TX={0x6, 0xcd, [0xc58]}]}, 0x548}, 0x1, 0x0, 0x0, 0x4000004}, 0x1) (async) sendmsg$NL80211_CMD_FRAME(r0, &(0x7f0000000080)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x20}, 0xc, &(0x7f0000000040)={&(0x7f0000000400)={0x548, r4, 0x2, 0x70bd27, 0x25dfdbfb, {{}, {@void, @void}}, [@NL80211_ATTR_FRAME={0x4f2, 0x33, @reassoc_resp={@wo_ht={{0x0, 0x0, 0x3, 0x0, 0x0, 0x1, 0x0, 0x0, 0x1}, {0x1}, @device_b, @broadcast, @initial, {0x2, 0xb90}}, 0x4008, 0x31, @default, @void, @void, [{0xdd, 0xdd, "893bddf4f39fe23047d81def36c83ea7dc86cbe2f772c49c22fe49425e1088de8158ffa6463c6cb92125eb864dabe27eb9a255a02aa68cff2421aac59af58439cc2aea96e0da07554c5e9fe7993507e908e173468d8f854a9abf6e03bb5f333a1457712a32fd8e761ed8f3fefa388090ea0ac5c8c31a41af9bb575c8c7d1280985a91a9fc9e05a54be6c61f0b0d74ee93233d805a69957df477a33d9be27e721196fcfebcd8995ba3a3d7ffee28e15deb87b04ae17427585b1add813ef79fe81ef11953cf553ee115bb11eb2b7724aeac3a67b7f922f46abe0ac104e33"}, {0xdd, 0x4f, "d14dcc51ed6efd00750ed5aa5e4911bc07848c25253496e60b0cd2b3ca5b6f13f89915809d3074f057996254ffebbc830e31ed205e01cfebcc4df82980a796fe2b5bce11f53f33339dc3e71349b025"}, {0xdd, 0x11, "8412ce67132c319517c43df8abd762d417"}, {0xdd, 0xfe, "afa2dfa9b948e8520bfdfce1580237969efc0e6637978612a367ea7db0b6cc7d85d7dd928e070b9420e7769a549f657f9c4c6cf45d883df0abe51115ab78dafef03f1305dbcec17db22b8414358758c78f8ef8ca3779c2b9d5450610d2265fef70395ee8d0a706f2a032409e575aa19e20f3c9fec89cd2c03ff3d339eee7ba8365fdaf06da6f664ddc47d441edfaa9a5123c639a4dbf118ed32d17365381f26af2fe5c3daffd4c973357290e6066389f8c8eae15b4f4052b9fcf060f7b8059e46b4ca7413d103db7a83c2278e0f0db436b7620d98ceea5e40fa1235d11db1c713c746b4c34c0c4f23a4186caad6c8ca9021593a465322f6e1c3b74e465e5"}, {0xdd, 0xbd, "57130242ab769d9a05497e2b28585843152c3b0fa845362b9539d9d54ba11ac7e6d52b566d60c5b97fedb660a728901aa8d526d5ec179473a14075419bdbd139bd71577baa331f6f658866b6d61e66fe1460bdcb76c446b67f566fab16615f1ddced97b2d1afc9a3de2779575fb6e53a0bf8cdec8c06392db58ab2c88a5730143eccbdfcc3d4525129fe9802b201fc3301b225cea1418e5661f1eee1b7db04f9d0a8c48bfd9660770661f03bfa1a494a3a798f166e2433b94dc6f9f92e"}, {0xdd, 0xe, "c7cb2d1497831b0a574b0bbe92c3"}, {0xdd, 0x7a, "f97f542c893c39984db8b5d544ca54c7273e8b4461efced3489fdda30b631d1c207c9b6728e13a0b8f531b9ada8aa97823694ec47c402b23bf00098b1a46e11b63e6b50d07e5b0939e5effc859cd489baf8e8f0f9301ba3ac626fa344d71069333ce55d615f6c4e5dabcc755e75cc4b76bbd391e9d3d9df6bfc4"}, {0xdd, 0xc6, "effe6a53891cb8a3dff6d13aa0719626ce09ddbe1906b3c8cf43b360ae71e3ad005c2b43242f39e452318d5bd72f23dd3bbc179e86d4f8212721a34fdf522e0af37d8032294539b38038623fa910dcb0f23b46068ef3901ef3bada7627753a7f6b93f19be03e4eb76ca67d1733179aea8a83a9234a361d60c566faeb1e549bcd0fc124c594cae657c30fdd9139f2791628cc1922d3efbb7965840dd93ab35f7b5606b76175fe9cd2351de60a9c20cf0efd627959d07ae7ff5c38641aefb954396718588dd75d"}, {0xdd, 0x78, "1031333a81626f8693885ca0f6f3c4b022d30b7e4fdb0bc44468743b7d35f3a3a3dd98d80217a589e571e0678e1ac94f384baf882854fee971575bd1ef698db97811db25aa370b1246126d963492553ee43f5065c8c9ee6ae33518cdc24ce9f4657c8d36f913fd74e6325799928ffe9ed11e44bb7f58b4f7"}]}}, @NL80211_ATTR_CSA_C_OFFSETS_TX={0xc, 0xcd, [0xe9, 0x9, 0x6, 0x3f]}, @chandef_params=[@NL80211_ATTR_CENTER_FREQ1={0x8, 0xa0, 0x5}, @NL80211_ATTR_WIPHY_EDMG_BW_CONFIG={0x5, 0x119, 0xa}, @NL80211_ATTR_WIPHY_FREQ_OFFSET={0x8, 0x122, 0x6f}], @NL80211_ATTR_CSA_C_OFFSETS_TX={0xe, 0xcd, [0xff01, 0x1, 0xe, 0x2, 0xc98]}, @NL80211_ATTR_DONT_WAIT_FOR_ACK={0x4}, @NL80211_ATTR_CSA_C_OFFSETS_TX={0x6, 0xcd, [0xc58]}]}, 0x548}, 0x1, 0x0, 0x0, 0x4000004}, 0x1) socket$netlink(0x10, 0x3, 0x0) (async) r5 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r5, 0x0, 0x0) 10:42:03 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = socket$nl_xfrm(0x10, 0x3, 0x6) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r2 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) sendmsg$NL80211_CMD_NEW_MPATH(r0, &(0x7f0000000440)={&(0x7f0000000180)={0x10, 0x0, 0x0, 0x10000}, 0xc, &(0x7f00000002c0)={&(0x7f00000003c0)={0x70, 0x0, 0xf8a7ca0f2a3df9b1, 0x70bd27, 0x25dfdbfc, {{}, {@val={0x8}, @val={0xc, 0x99, {0x40, 0x68}}}}, [@NL80211_ATTR_MPATH_NEXT_HOP={0xa, 0x1a, @broadcast}, @NL80211_ATTR_MPATH_NEXT_HOP={0xa, 0x1a, @broadcast}, @NL80211_ATTR_MAC={0xa, 0x6, @broadcast}, @NL80211_ATTR_MPATH_NEXT_HOP={0xa, 0x1a, @device_b}, @NL80211_ATTR_MAC={0xa, 0x6, @broadcast}, @NL80211_ATTR_MPATH_NEXT_HOP={0xa, 0x1a, @broadcast}]}, 0x70}, 0x1, 0x0, 0x0, 0x20044800}, 0x40004) r3 = openat$cgroup_ro(r2, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r3, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) (async) ioctl$FS_IOC_RESVSP(r3, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r3, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) (async) ioctl$FS_IOC_RESVSP(r3, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r3, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r3, 0x0, 0x0) sendmsg$nl_xfrm(r3, &(0x7f0000000140)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f0000000100)={&(0x7f0000000080)=ANY=[@ANYBLOB="4c0000001f0010002dbd7000ffdbdf25e0000002000000000000000000000000000004d50a003300fe80000000000000000000000000005d07350000000100"/76], 0x4c}, 0x1, 0x0, 0x0, 0x804}, 0x2000c0c1) [ 2187.385110][ T7977] 8021q: adding VLAN 0 to HW filter on device bond1409 [ 2187.510391][ T8007] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2187.645163][ T7985] bond1409: (slave bridge1273): making interface the new active one [ 2187.671315][ T7985] bond1409: (slave bridge1273): Enslaving as an active interface with an up link [ 2187.714553][ T7999] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 10:42:03 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xeb160000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:42:03 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) r1 = socket$nl_xfrm(0x10, 0x3, 0x6) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) r2 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) sendmsg$NL80211_CMD_NEW_MPATH(r0, &(0x7f0000000440)={&(0x7f0000000180)={0x10, 0x0, 0x0, 0x10000}, 0xc, &(0x7f00000002c0)={&(0x7f00000003c0)={0x70, 0x0, 0xf8a7ca0f2a3df9b1, 0x70bd27, 0x25dfdbfc, {{}, {@val={0x8}, @val={0xc, 0x99, {0x40, 0x68}}}}, [@NL80211_ATTR_MPATH_NEXT_HOP={0xa, 0x1a, @broadcast}, @NL80211_ATTR_MPATH_NEXT_HOP={0xa, 0x1a, @broadcast}, @NL80211_ATTR_MAC={0xa, 0x6, @broadcast}, @NL80211_ATTR_MPATH_NEXT_HOP={0xa, 0x1a, @device_b}, @NL80211_ATTR_MAC={0xa, 0x6, @broadcast}, @NL80211_ATTR_MPATH_NEXT_HOP={0xa, 0x1a, @broadcast}]}, 0x70}, 0x1, 0x0, 0x0, 0x20044800}, 0x40004) (async) r3 = openat$cgroup_ro(r2, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r3, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r3, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) (async) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r3, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) (async) write$tun(r3, 0x0, 0x0) (async) sendmsg$nl_xfrm(r3, &(0x7f0000000140)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f0000000100)={&(0x7f0000000080)=ANY=[@ANYBLOB="4c0000001f0010002dbd7000ffdbdf25e0000002000000000000000000000000000004d50a003300fe80000000000000000000000000005d07350000000100"/76], 0x4c}, 0x1, 0x0, 0x0, 0x804}, 0x2000c0c1) 10:42:03 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000000)='blkio.throttle.io_serviced_recursive\x00', 0x275a, 0x0) readv(r0, &(0x7f0000000280)=[{&(0x7f0000000040)=""/233, 0xe9}, {&(0x7f0000000140)=""/94, 0x5e}, {&(0x7f00000001c0)=""/114, 0x72}, {&(0x7f0000000240)=""/50, 0x32}], 0x4) mmap(&(0x7f0000ffa000/0x3000)=nil, 0x3000, 0xa, 0x110, r0, 0xffa75000) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0x1f87f9b6) ioctl$BTRFS_IOC_INO_LOOKUP_USER(r0, 0xd000943e, &(0x7f00000002c0)={0x0, 0x0, "6fdc2659a13ec0f522902e8011a17ecf42109222a9dc210ffa2eb5176a136065325e1f8f1eecd84eede16798280562a09541b76c96e580fc958371f8926c91deb0ca1a4dd367b7e9fa951318aa01e20b5640aae27ba20e1bca56062b9843ea1672f9ee55fcb566f3319ffc6684a05e2cc529e1e64ce327434aee0771b8a5d7462c040334bfcd63183130d457522d249038ed6d7dfa93f1b1e6637bb61f70913793dbaf0ef439311c455499771925a6a5a6384a31e6528ef61d5a9518a63d7fb626e204b2dae1896b60a17aa570d3fe141e8ee7a373fbb7c6852f5c427c7bc0e0f80a80b5058f388c77b53aaf24fe39e877efa7206d3468fa9221ff69ece91c43", "2e1c8f7fe9679c33d9307a48b3d8855351c6c1d1fe427e217eb76967bb3b15b8bd728cbf2a0efae6b4248fd513868ca124942bcc1b59138251be9806d4054a53ea3704f50f706b73bdfb2487c746dc22018b3d160e08f44e4b010b8716a972b7255b4a88d0cbd92f43dcd329bc106fb6f7a97d255af7647b61bd15af14b387ced234ff9710161730aeaf621875c9fde2951fcd8e57583addb33392a6c869c2d2e29ba5f528933690be30726c17863b72100971963e9a4adad3a5316d34cff21600aeb45bb4fe5dd2f695e3de374f63a4db8922c039c448a668c971c2cd092d3232c7e18418744fe9d8749f1965400d4841e07de908d9fe4d9d60736c23c5204d15f2e0b2932dc85a92c4f0731dbaf141014419be4fad839f2353b532d7a0a6591b17d4292bf7c8f5ae0b5cdf2b18e665f189284d8bfaba1b23aacda193d237390d69c6ff6762fb972a87cd802852b87306d86ee4c22af8391f47e164f7a46c35c33f4c32389f36755fec7fdbca273b9560017c5983fe80f199563c385c1b30fed4e481cb133c5efda36620b544566af365b7b5fc2d948710c21b4140c93fe44ab6fcd7b689ed6b5388b5169dfc0813a54e84aac1c07e996128011a89de1175906fc56f64c7dd97b15f74035d335eb1d962e4d5f7a5a306cf8b9c6d290a3b529764a9ed3af169999ff896ee01fbba76ab2cc6b8936a7cef06094805d7ad41f01c3d7a11992e5d737667c20718135b0f52346780d6cfeec3e000cf44a799ffbea41be2ce841ef128290ff751c950658849cd17ce9bae5b530b712acb3cd835989c6a80c2b5c540356a4022e4b519827958a741d1c1928962658313a311ff9eb9e7909657584c5f5ed1699a8f943c7df8befc84104a75b7c0b42cc8543a16dfdff907edc1a6677de53515292b50bee9bc559200b09d2526beafbb3c6df4ba8ea9552a69adc56c9807450ce7a30987a661e5d6f313e5bf7d3bb755933c2c466fc03900980d7a8c9aaedd3dfc2ad7994dc86c3aecebca5b2c58f86f74231810bb2d840e617be60ebdd3b57eedb4e2f2081de8b6323844922c474dd38b3cb66abd8d5846d2bbe026a00cad367cb51d5ee8b30025ae1050347937461b98303ce806711000e9c335d52cc0ba75e3ad7bf6fca49a2e3ea30749669791f381b91ed27e97bb11af9be70ffe442b7f1144317de67eda7ce86aca8ec38ed3cab38f077a3ad08f4a87f364ef5ae3f4297486c0cbf1e0bb2ca954a706d0fe81251ecc6635e3d713d5d0ae3c423ead6fbf89a87f52fd44d0832f3a4da1229908dfe2cbbf22f622c9be5900180021bee83688bd95477302b02d994e70f065d1278ced4b9aadf4db252423264c85f5f39ba3d56e06538204213b7df290780c8b4733793587f3ca4b9d50bcb8fe06ba29fc5f8f29686e7baf923cc35da97b95ce96d0c02e7e22d653d4e48ee8c131fe80cf02e92839bf7e63dbddf0937b5fc00135af5006932fd47922c69513d82ecb2b97bb1ff6649b99e0d4f5d5e069d8dcfb4c56e30db8b6499409441b789571e3059028e283b5d94929c481c9420663fe044e7485e9f1726e02bb548e8298270139f596d1cca2628a8c04f6cb5ec352b23ae4c36e1b20c08a96fc9efa2d72992efa0e7877e81dccbd2ca42aa36b5d9795491e998bc681b4eb99af5146e5008dd78cd92ac398e4429c4eca0b901b92a2628641fcda99bd34015a924685f29bcf4e373bc11c818818a5b2d7aa356e9b0445aa2063f81085e6337c0b0efd24590e5d41a0715fc9f370af1fde9d0159dd25c46ae538cc84e4f617d1dafce988ca4282dbda7d74cab62483bc2ac8f773a83f6db5d5633f166fa880b76374eb6b1f150d9d3f31da28a0e1f741b30eb15f6f0587c49a0bde370720c81eee3670bcef26ac917a3a0bc600c0d4314b3b7d5f0bc2b930d91b6599ed7366a270963daa69a670ac3aca29a348da256e276a65dfb7125f48558ec70db4a9d49b8cedba0034f0136cb0c870e9111c6424c43a420277d931c0ed8f3296bb9c63783e54c68b7b5bdcc1c930ddbc446733c61698bb1073c31caa568dbacb131e132b9952fff40bec8656b0b5839b65af15bd873b9a2df86bcdd05c5a8e83b66befb987a85871b0dab1b46aa90c7862822b04ff3b494ac57ba76621833d1fecea9379dd389e1959ffe3ada16ef562cb58f8c7fe744b59782f14aa68c7c80e78aa00a9e04dee44f99e330859aad2466af2ada1f3e0667ce42ca155d1b966611b6a2c94926fc6b065292f8323293546a582d05b5ab2931a3785f542623b6989e8d714a815d9243e910a428833c3449e586cd64280612ab8092aa8a55b8bd0bdd21c534734bd7d3e0d823df70374842ba84e89a89062ce23d810ec60c394d5289f5ab16d69e0b3d7745adfc2e8cf12e76f04c528fa0796ccee7a5f2a364e1ace2fb26aee1aa41b543875fc124028595d2e9daa182264b1c247bd276a1a1ed0a35bef45115b81690dc62765dc49d342ac4f749894e42b14b5094623d04ef66f8d2d4e1cdb55452049d89d078191e51c216188385d846d5430bbbc8525a97b08b397cdb28b0e8466e629c7664f6c844851271e5cd85315bb8a483f48536b52ba572c9d6eb8aff004bf8a416e41c631835ad1cb890b3293c9a4d5299314bbf0ae1a3dbd54aa58443a5e5cba60cfbfd6051fad6b933e700d4dcc7efebf17ec72d9b447d82439b66bcc49ce16c8e7d635b3719eca5d0656e61cec439447d802e2ab8b4c716c78de011ebe4a84a2679ace5ad5e9738cfca9e640bf992d8bb8dd4d69005bd5a736bf37b100735e2699f560f162a9542192588084d26698bd52a007350d0602393706dc64e5b25b2ad8338b07e2c0bf140a4c9d83e0e6f7e7536c7b69fd48b3ecd6e8aab68f10e508ed002bb3b4f5601432741156940cf4475d0e4bb02f4074b4d2b083625f1d421236da2c176d62ac8a3ac6a9b15e52f797c2a9fa08ffbc17d7e480ac876c64f9f3b3fc78ca1dda58a16ef4f93aca3b32a890c503506f4564446743d0ad22a3b9d63583b1527a723684521fa39db38d7064c89688e57ecaae15738c5f51466ab10619c2beb944a6f47a171a1de7b0766f6b9aa13d97cfdbcb5eef761c96811673176a013829510e1ccdc72a721b55433fc704a2045742adfe1ca2d5cd25d1c9dbb6be7b22ecc16d31172797e9efbb2b1b048224c61dc47ffc9c564fd6be9154a48fc1c62433de5c90abca99d5003df69ebbed6c08b4a73d712d7debff75f83bc6b9d0237f63d8020edbe6de7f77a50a70103e3b1afae2665f4e2fd78f658f0a0907888567a8dfcdeb8dd6366245aa1d45ac2d54abd0725ca8bee6bb30b67885b7d3e196eca7577354abbc0f956190b288c942a96c05f76cf634f7b41542126742b33a75e125b2f87ec2c9d4970169de28de80b08b64e94725c193d6858ac77bf4c0d14ddfd90c9c2786688948ea9a85b305726b7c15e6e215ad2a071fdd72f2e4b132cb2498689a64d791833fc16cabaa87d35ab052670e62c7250b65d0780fa30e86dba82405ae403c8db0229aa4d089f3a23155d6339a916bce926433bed1f403dbb8d837bc8edc03f23c6bc202269b7c92a472b07f50c90989cbb7934ac2980556bbc6de9d1bc96dd7632bbe0f25c67e971d8c80beaeaa459b1d8f80ea043009059e2d7da33688f1a7b05b61cc28c75ca78fbb75afe1062293b39b48f805471ddfb85c8e903270b20a843f109d6f6d0a3f5cffeaf00a602b9467c6247276bb75ca6d5c20fb578128a8080ba94f07afa0c28526106627ac50724c37f61eabf15fb9acc7f9c796d25f883c88e4dcbe13b669cb55df9623740b061511b44f93796601ea359af03e8483a12341ea18a59b7477361c94ddcddfcb1a623061642582df0b85ad2ac59d93b5e6c9a9185ee8746ba50fc026eeb3c3b4c8ed02680556d6a2b560eb997d2815a886eb7a15aa2e3d8ea4184fa81288edcc790fd3ecd8753e067ecbcdc4bf9c1d9033fce70cd689899da85a13088b9b376b43a589fd88eadc85faba0a4dedf0c5d1dd850d4bab0608b32ecbbcf2fc9fd3de407d6eedb670b88922ccc39ad1408cd0bb68b3d83e97c13589e86c03cb0260ee9e0af9374c63293d2dbfc0d8edfee6460913de6c2fe286f9b1e5673f875aa6b1ff376e9ab3590e3726292e05dd51b48386d592e21463581eb3647d3a556f54b81bb8f200699491f44d5673ac52ebcf301b9b76a267d75cbb106ef02d06e6727b38b76554525b59d5ea585cce8c5b8fa8a0b26399408d03029096e99771ea5f268c6b54709452a845b885e9cdc315392c3e11d79ceeb85b490e3276d17c92d63fafa5c8ec088ee07059c9b0b842775f7e4fb8d03f9882fd11612e1d34d22263439c99cd601c79980f78507e18b6ccd8c51b653f2dcd11de7eb74f903ee87e5b946441b8cc1d9f09e36b06027980d9a2c51bac40711352ed4a2d957d43eb9a49169c72fbfd9ad79c17f14fc7091e94703752c9c9085d12e1c15bb93f7f86974382649994c8705fec8c97df620b75104c4d615b983ac37bb5b5c7e29edee7e4734737211d8ae85c8038d60ff30d6cc6225f3d5ac99070d79fd1514c4163b7b3166397e2c0e918224d27296d8fec84880e1a844360b55b01fc1b5b1169c2f539ab02f5d8a9814432ecbcaf578fb12d444ae762144f302fa70912110e647ff1372140414fc6262f671f5d3444cc661000167862f190e192a30357a7755d8e8522427ae0801a35641a6208960e58e3d266d7490c46b22b4890e53d1f39776dcc7a80a1b21dce4483a4b494dfa2e90e66fe26e17fdf5762fa5a2eaefcbbae15016ea51cd5ef9819e751586b15fb593cdf092b7d13847fd28723744572055853a55600b3dcdbc651f33998dc7b65072c6f4c69210772e43fce79131a5854d15eab49ee8dcbd0484eab7b05d8828b7d701e74b52fb0ebf95b21a9910a204313be10ccb4c069379215f39a14cd9011242e629efa1f77764eefcc9f1cff0c90ac17462a390e52f3de140e693923448a164713aabd31d787babde59c51a0212c92e52082fd659f5515b9c5c09b566984bab76ac1673ddcb8f12efe8c2330cb2571d34d837c4e2fc845e23ceb79edb00344eae915cbe32d44195ac2681cf29f80a6cacd318d63b461b7467ef1d69ec308063a8306a6beb28ab9bcfddfeec2c7567365a03d3dd68748cc5ff1dacb8878ad74f18c61edf5d4a52e319ce463b99a33302c5e424cc816e0e49b4c1f33ed96f11e0e207d1c1b181c099462d2b84f2b0ca3f0aacbe0ce62c03e01dfd85d7f215acd52374e0e0a390c415ca099c6336dffce680b688a52a1ace99433bec18359e84c32528003068a3240ef28d55b6f86cf90abc6a6205eb80c3ebe4fb82bb4de6e9c222793789a69ea"}) (async) ioctl$BTRFS_IOC_INO_LOOKUP_USER(r0, 0xd000943e, &(0x7f00000002c0)={0x0, 0x0, "6fdc2659a13ec0f522902e8011a17ecf42109222a9dc210ffa2eb5176a136065325e1f8f1eecd84eede16798280562a09541b76c96e580fc958371f8926c91deb0ca1a4dd367b7e9fa951318aa01e20b5640aae27ba20e1bca56062b9843ea1672f9ee55fcb566f3319ffc6684a05e2cc529e1e64ce327434aee0771b8a5d7462c040334bfcd63183130d457522d249038ed6d7dfa93f1b1e6637bb61f70913793dbaf0ef439311c455499771925a6a5a6384a31e6528ef61d5a9518a63d7fb626e204b2dae1896b60a17aa570d3fe141e8ee7a373fbb7c6852f5c427c7bc0e0f80a80b5058f388c77b53aaf24fe39e877efa7206d3468fa9221ff69ece91c43", "2e1c8f7fe9679c33d9307a48b3d8855351c6c1d1fe427e217eb76967bb3b15b8bd728cbf2a0efae6b4248fd513868ca124942bcc1b59138251be9806d4054a53ea3704f50f706b73bdfb2487c746dc22018b3d160e08f44e4b010b8716a972b7255b4a88d0cbd92f43dcd329bc106fb6f7a97d255af7647b61bd15af14b387ced234ff9710161730aeaf621875c9fde2951fcd8e57583addb33392a6c869c2d2e29ba5f528933690be30726c17863b72100971963e9a4adad3a5316d34cff21600aeb45bb4fe5dd2f695e3de374f63a4db8922c039c448a668c971c2cd092d3232c7e18418744fe9d8749f1965400d4841e07de908d9fe4d9d60736c23c5204d15f2e0b2932dc85a92c4f0731dbaf141014419be4fad839f2353b532d7a0a6591b17d4292bf7c8f5ae0b5cdf2b18e665f189284d8bfaba1b23aacda193d237390d69c6ff6762fb972a87cd802852b87306d86ee4c22af8391f47e164f7a46c35c33f4c32389f36755fec7fdbca273b9560017c5983fe80f199563c385c1b30fed4e481cb133c5efda36620b544566af365b7b5fc2d948710c21b4140c93fe44ab6fcd7b689ed6b5388b5169dfc0813a54e84aac1c07e996128011a89de1175906fc56f64c7dd97b15f74035d335eb1d962e4d5f7a5a306cf8b9c6d290a3b529764a9ed3af169999ff896ee01fbba76ab2cc6b8936a7cef06094805d7ad41f01c3d7a11992e5d737667c20718135b0f52346780d6cfeec3e000cf44a799ffbea41be2ce841ef128290ff751c950658849cd17ce9bae5b530b712acb3cd835989c6a80c2b5c540356a4022e4b519827958a741d1c1928962658313a311ff9eb9e7909657584c5f5ed1699a8f943c7df8befc84104a75b7c0b42cc8543a16dfdff907edc1a6677de53515292b50bee9bc559200b09d2526beafbb3c6df4ba8ea9552a69adc56c9807450ce7a30987a661e5d6f313e5bf7d3bb755933c2c466fc03900980d7a8c9aaedd3dfc2ad7994dc86c3aecebca5b2c58f86f74231810bb2d840e617be60ebdd3b57eedb4e2f2081de8b6323844922c474dd38b3cb66abd8d5846d2bbe026a00cad367cb51d5ee8b30025ae1050347937461b98303ce806711000e9c335d52cc0ba75e3ad7bf6fca49a2e3ea30749669791f381b91ed27e97bb11af9be70ffe442b7f1144317de67eda7ce86aca8ec38ed3cab38f077a3ad08f4a87f364ef5ae3f4297486c0cbf1e0bb2ca954a706d0fe81251ecc6635e3d713d5d0ae3c423ead6fbf89a87f52fd44d0832f3a4da1229908dfe2cbbf22f622c9be5900180021bee83688bd95477302b02d994e70f065d1278ced4b9aadf4db252423264c85f5f39ba3d56e06538204213b7df290780c8b4733793587f3ca4b9d50bcb8fe06ba29fc5f8f29686e7baf923cc35da97b95ce96d0c02e7e22d653d4e48ee8c131fe80cf02e92839bf7e63dbddf0937b5fc00135af5006932fd47922c69513d82ecb2b97bb1ff6649b99e0d4f5d5e069d8dcfb4c56e30db8b6499409441b789571e3059028e283b5d94929c481c9420663fe044e7485e9f1726e02bb548e8298270139f596d1cca2628a8c04f6cb5ec352b23ae4c36e1b20c08a96fc9efa2d72992efa0e7877e81dccbd2ca42aa36b5d9795491e998bc681b4eb99af5146e5008dd78cd92ac398e4429c4eca0b901b92a2628641fcda99bd34015a924685f29bcf4e373bc11c818818a5b2d7aa356e9b0445aa2063f81085e6337c0b0efd24590e5d41a0715fc9f370af1fde9d0159dd25c46ae538cc84e4f617d1dafce988ca4282dbda7d74cab62483bc2ac8f773a83f6db5d5633f166fa880b76374eb6b1f150d9d3f31da28a0e1f741b30eb15f6f0587c49a0bde370720c81eee3670bcef26ac917a3a0bc600c0d4314b3b7d5f0bc2b930d91b6599ed7366a270963daa69a670ac3aca29a348da256e276a65dfb7125f48558ec70db4a9d49b8cedba0034f0136cb0c870e9111c6424c43a420277d931c0ed8f3296bb9c63783e54c68b7b5bdcc1c930ddbc446733c61698bb1073c31caa568dbacb131e132b9952fff40bec8656b0b5839b65af15bd873b9a2df86bcdd05c5a8e83b66befb987a85871b0dab1b46aa90c7862822b04ff3b494ac57ba76621833d1fecea9379dd389e1959ffe3ada16ef562cb58f8c7fe744b59782f14aa68c7c80e78aa00a9e04dee44f99e330859aad2466af2ada1f3e0667ce42ca155d1b966611b6a2c94926fc6b065292f8323293546a582d05b5ab2931a3785f542623b6989e8d714a815d9243e910a428833c3449e586cd64280612ab8092aa8a55b8bd0bdd21c534734bd7d3e0d823df70374842ba84e89a89062ce23d810ec60c394d5289f5ab16d69e0b3d7745adfc2e8cf12e76f04c528fa0796ccee7a5f2a364e1ace2fb26aee1aa41b543875fc124028595d2e9daa182264b1c247bd276a1a1ed0a35bef45115b81690dc62765dc49d342ac4f749894e42b14b5094623d04ef66f8d2d4e1cdb55452049d89d078191e51c216188385d846d5430bbbc8525a97b08b397cdb28b0e8466e629c7664f6c844851271e5cd85315bb8a483f48536b52ba572c9d6eb8aff004bf8a416e41c631835ad1cb890b3293c9a4d5299314bbf0ae1a3dbd54aa58443a5e5cba60cfbfd6051fad6b933e700d4dcc7efebf17ec72d9b447d82439b66bcc49ce16c8e7d635b3719eca5d0656e61cec439447d802e2ab8b4c716c78de011ebe4a84a2679ace5ad5e9738cfca9e640bf992d8bb8dd4d69005bd5a736bf37b100735e2699f560f162a9542192588084d26698bd52a007350d0602393706dc64e5b25b2ad8338b07e2c0bf140a4c9d83e0e6f7e7536c7b69fd48b3ecd6e8aab68f10e508ed002bb3b4f5601432741156940cf4475d0e4bb02f4074b4d2b083625f1d421236da2c176d62ac8a3ac6a9b15e52f797c2a9fa08ffbc17d7e480ac876c64f9f3b3fc78ca1dda58a16ef4f93aca3b32a890c503506f4564446743d0ad22a3b9d63583b1527a723684521fa39db38d7064c89688e57ecaae15738c5f51466ab10619c2beb944a6f47a171a1de7b0766f6b9aa13d97cfdbcb5eef761c96811673176a013829510e1ccdc72a721b55433fc704a2045742adfe1ca2d5cd25d1c9dbb6be7b22ecc16d31172797e9efbb2b1b048224c61dc47ffc9c564fd6be9154a48fc1c62433de5c90abca99d5003df69ebbed6c08b4a73d712d7debff75f83bc6b9d0237f63d8020edbe6de7f77a50a70103e3b1afae2665f4e2fd78f658f0a0907888567a8dfcdeb8dd6366245aa1d45ac2d54abd0725ca8bee6bb30b67885b7d3e196eca7577354abbc0f956190b288c942a96c05f76cf634f7b41542126742b33a75e125b2f87ec2c9d4970169de28de80b08b64e94725c193d6858ac77bf4c0d14ddfd90c9c2786688948ea9a85b305726b7c15e6e215ad2a071fdd72f2e4b132cb2498689a64d791833fc16cabaa87d35ab052670e62c7250b65d0780fa30e86dba82405ae403c8db0229aa4d089f3a23155d6339a916bce926433bed1f403dbb8d837bc8edc03f23c6bc202269b7c92a472b07f50c90989cbb7934ac2980556bbc6de9d1bc96dd7632bbe0f25c67e971d8c80beaeaa459b1d8f80ea043009059e2d7da33688f1a7b05b61cc28c75ca78fbb75afe1062293b39b48f805471ddfb85c8e903270b20a843f109d6f6d0a3f5cffeaf00a602b9467c6247276bb75ca6d5c20fb578128a8080ba94f07afa0c28526106627ac50724c37f61eabf15fb9acc7f9c796d25f883c88e4dcbe13b669cb55df9623740b061511b44f93796601ea359af03e8483a12341ea18a59b7477361c94ddcddfcb1a623061642582df0b85ad2ac59d93b5e6c9a9185ee8746ba50fc026eeb3c3b4c8ed02680556d6a2b560eb997d2815a886eb7a15aa2e3d8ea4184fa81288edcc790fd3ecd8753e067ecbcdc4bf9c1d9033fce70cd689899da85a13088b9b376b43a589fd88eadc85faba0a4dedf0c5d1dd850d4bab0608b32ecbbcf2fc9fd3de407d6eedb670b88922ccc39ad1408cd0bb68b3d83e97c13589e86c03cb0260ee9e0af9374c63293d2dbfc0d8edfee6460913de6c2fe286f9b1e5673f875aa6b1ff376e9ab3590e3726292e05dd51b48386d592e21463581eb3647d3a556f54b81bb8f200699491f44d5673ac52ebcf301b9b76a267d75cbb106ef02d06e6727b38b76554525b59d5ea585cce8c5b8fa8a0b26399408d03029096e99771ea5f268c6b54709452a845b885e9cdc315392c3e11d79ceeb85b490e3276d17c92d63fafa5c8ec088ee07059c9b0b842775f7e4fb8d03f9882fd11612e1d34d22263439c99cd601c79980f78507e18b6ccd8c51b653f2dcd11de7eb74f903ee87e5b946441b8cc1d9f09e36b06027980d9a2c51bac40711352ed4a2d957d43eb9a49169c72fbfd9ad79c17f14fc7091e94703752c9c9085d12e1c15bb93f7f86974382649994c8705fec8c97df620b75104c4d615b983ac37bb5b5c7e29edee7e4734737211d8ae85c8038d60ff30d6cc6225f3d5ac99070d79fd1514c4163b7b3166397e2c0e918224d27296d8fec84880e1a844360b55b01fc1b5b1169c2f539ab02f5d8a9814432ecbcaf578fb12d444ae762144f302fa70912110e647ff1372140414fc6262f671f5d3444cc661000167862f190e192a30357a7755d8e8522427ae0801a35641a6208960e58e3d266d7490c46b22b4890e53d1f39776dcc7a80a1b21dce4483a4b494dfa2e90e66fe26e17fdf5762fa5a2eaefcbbae15016ea51cd5ef9819e751586b15fb593cdf092b7d13847fd28723744572055853a55600b3dcdbc651f33998dc7b65072c6f4c69210772e43fce79131a5854d15eab49ee8dcbd0484eab7b05d8828b7d701e74b52fb0ebf95b21a9910a204313be10ccb4c069379215f39a14cd9011242e629efa1f77764eefcc9f1cff0c90ac17462a390e52f3de140e693923448a164713aabd31d787babde59c51a0212c92e52082fd659f5515b9c5c09b566984bab76ac1673ddcb8f12efe8c2330cb2571d34d837c4e2fc845e23ceb79edb00344eae915cbe32d44195ac2681cf29f80a6cacd318d63b461b7467ef1d69ec308063a8306a6beb28ab9bcfddfeec2c7567365a03d3dd68748cc5ff1dacb8878ad74f18c61edf5d4a52e319ce463b99a33302c5e424cc816e0e49b4c1f33ed96f11e0e207d1c1b181c099462d2b84f2b0ca3f0aacbe0ce62c03e01dfd85d7f215acd52374e0e0a390c415ca099c6336dffce680b688a52a1ace99433bec18359e84c32528003068a3240ef28d55b6f86cf90abc6a6205eb80c3ebe4fb82bb4de6e9c222793789a69ea"}) [ 2187.788186][ T7999] 8021q: adding VLAN 0 to HW filter on device bond850 10:42:03 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xea030000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:03 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) r0 = accept(0xffffffffffffffff, 0x0, 0x0) (async) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async, rerun: 32) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async, rerun: 32) r1 = socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$nl_generic(0x10, 0x3, 0x10) (async) ioctl$sock_SIOCGIFINDEX_80211(r1, 0x8933, &(0x7f0000000480)={'wlan1\x00', 0x0}) (async, rerun: 64) r4 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000180), 0xffffffffffffffff) (rerun: 64) sendmsg$NL80211_CMD_REMAIN_ON_CHANNEL(r2, &(0x7f0000000080)={0x0, 0x0, &(0x7f0000000100)={&(0x7f0000000040)=ANY=[@ANYBLOB=',\x00\x00\x00', @ANYRES16=r4, @ANYBLOB="01000000000000000000370f000008000300", @ANYRES32=r3, @ANYBLOB="080026006c0900000800570080"], 0x2c}}, 0x0) sendmsg$NL80211_CMD_FRAME(r0, &(0x7f0000000080)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x20}, 0xc, &(0x7f0000000040)={&(0x7f0000000400)={0x548, r4, 0x2, 0x70bd27, 0x25dfdbfb, {{}, {@void, @void}}, [@NL80211_ATTR_FRAME={0x4f2, 0x33, @reassoc_resp={@wo_ht={{0x0, 0x0, 0x3, 0x0, 0x0, 0x1, 0x0, 0x0, 0x1}, {0x1}, @device_b, @broadcast, @initial, {0x2, 0xb90}}, 0x4008, 0x31, @default, @void, @void, [{0xdd, 0xdd, "893bddf4f39fe23047d81def36c83ea7dc86cbe2f772c49c22fe49425e1088de8158ffa6463c6cb92125eb864dabe27eb9a255a02aa68cff2421aac59af58439cc2aea96e0da07554c5e9fe7993507e908e173468d8f854a9abf6e03bb5f333a1457712a32fd8e761ed8f3fefa388090ea0ac5c8c31a41af9bb575c8c7d1280985a91a9fc9e05a54be6c61f0b0d74ee93233d805a69957df477a33d9be27e721196fcfebcd8995ba3a3d7ffee28e15deb87b04ae17427585b1add813ef79fe81ef11953cf553ee115bb11eb2b7724aeac3a67b7f922f46abe0ac104e33"}, {0xdd, 0x4f, "d14dcc51ed6efd00750ed5aa5e4911bc07848c25253496e60b0cd2b3ca5b6f13f89915809d3074f057996254ffebbc830e31ed205e01cfebcc4df82980a796fe2b5bce11f53f33339dc3e71349b025"}, {0xdd, 0x11, "8412ce67132c319517c43df8abd762d417"}, {0xdd, 0xfe, "afa2dfa9b948e8520bfdfce1580237969efc0e6637978612a367ea7db0b6cc7d85d7dd928e070b9420e7769a549f657f9c4c6cf45d883df0abe51115ab78dafef03f1305dbcec17db22b8414358758c78f8ef8ca3779c2b9d5450610d2265fef70395ee8d0a706f2a032409e575aa19e20f3c9fec89cd2c03ff3d339eee7ba8365fdaf06da6f664ddc47d441edfaa9a5123c639a4dbf118ed32d17365381f26af2fe5c3daffd4c973357290e6066389f8c8eae15b4f4052b9fcf060f7b8059e46b4ca7413d103db7a83c2278e0f0db436b7620d98ceea5e40fa1235d11db1c713c746b4c34c0c4f23a4186caad6c8ca9021593a465322f6e1c3b74e465e5"}, {0xdd, 0xbd, "57130242ab769d9a05497e2b28585843152c3b0fa845362b9539d9d54ba11ac7e6d52b566d60c5b97fedb660a728901aa8d526d5ec179473a14075419bdbd139bd71577baa331f6f658866b6d61e66fe1460bdcb76c446b67f566fab16615f1ddced97b2d1afc9a3de2779575fb6e53a0bf8cdec8c06392db58ab2c88a5730143eccbdfcc3d4525129fe9802b201fc3301b225cea1418e5661f1eee1b7db04f9d0a8c48bfd9660770661f03bfa1a494a3a798f166e2433b94dc6f9f92e"}, {0xdd, 0xe, "c7cb2d1497831b0a574b0bbe92c3"}, {0xdd, 0x7a, "f97f542c893c39984db8b5d544ca54c7273e8b4461efced3489fdda30b631d1c207c9b6728e13a0b8f531b9ada8aa97823694ec47c402b23bf00098b1a46e11b63e6b50d07e5b0939e5effc859cd489baf8e8f0f9301ba3ac626fa344d71069333ce55d615f6c4e5dabcc755e75cc4b76bbd391e9d3d9df6bfc4"}, {0xdd, 0xc6, "effe6a53891cb8a3dff6d13aa0719626ce09ddbe1906b3c8cf43b360ae71e3ad005c2b43242f39e452318d5bd72f23dd3bbc179e86d4f8212721a34fdf522e0af37d8032294539b38038623fa910dcb0f23b46068ef3901ef3bada7627753a7f6b93f19be03e4eb76ca67d1733179aea8a83a9234a361d60c566faeb1e549bcd0fc124c594cae657c30fdd9139f2791628cc1922d3efbb7965840dd93ab35f7b5606b76175fe9cd2351de60a9c20cf0efd627959d07ae7ff5c38641aefb954396718588dd75d"}, {0xdd, 0x78, "1031333a81626f8693885ca0f6f3c4b022d30b7e4fdb0bc44468743b7d35f3a3a3dd98d80217a589e571e0678e1ac94f384baf882854fee971575bd1ef698db97811db25aa370b1246126d963492553ee43f5065c8c9ee6ae33518cdc24ce9f4657c8d36f913fd74e6325799928ffe9ed11e44bb7f58b4f7"}]}}, @NL80211_ATTR_CSA_C_OFFSETS_TX={0xc, 0xcd, [0xe9, 0x9, 0x6, 0x3f]}, @chandef_params=[@NL80211_ATTR_CENTER_FREQ1={0x8, 0xa0, 0x5}, @NL80211_ATTR_WIPHY_EDMG_BW_CONFIG={0x5, 0x119, 0xa}, @NL80211_ATTR_WIPHY_FREQ_OFFSET={0x8, 0x122, 0x6f}], @NL80211_ATTR_CSA_C_OFFSETS_TX={0xe, 0xcd, [0xff01, 0x1, 0xe, 0x2, 0xc98]}, @NL80211_ATTR_DONT_WAIT_FOR_ACK={0x4}, @NL80211_ATTR_CSA_C_OFFSETS_TX={0x6, 0xcd, [0xc58]}]}, 0x548}, 0x1, 0x0, 0x0, 0x4000004}, 0x1) (async) r5 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) sendmsg$nl_route(r5, 0x0, 0x0) 10:42:03 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) r0 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r0, 0x0, 0x0) 10:42:04 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r1 = socket$inet_sctp(0x2, 0x1, 0x84) getsockopt$inet_sctp_SCTP_MAX_BURST(r1, 0x84, 0xd, &(0x7f0000000000)=@assoc_value, &(0x7f00000000c0)=0x8) mmap(&(0x7f0000ff4000/0xa000)=nil, 0xa000, 0x0, 0x4000010, r1, 0x892d0000) [ 2187.963815][ T8037] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2188.050392][ T8016] 8021q: adding VLAN 0 to HW filter on device bond1447 [ 2188.185768][ T8018] bridge1344: entered promiscuous mode [ 2188.208269][ T8018] bridge1344: entered allmulticast mode [ 2188.296298][ T8018] bond1447: (slave bridge1344): making interface the new active one [ 2188.316740][ T8018] bond1447: (slave bridge1344): Enslaving as an active interface with an up link 10:42:04 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xffffffa1}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:04 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000000)='blkio.throttle.io_serviced_recursive\x00', 0x275a, 0x0) readv(r0, &(0x7f0000000280)=[{&(0x7f0000000040)=""/233, 0xe9}, {&(0x7f0000000140)=""/94, 0x5e}, {&(0x7f00000001c0)=""/114, 0x72}, {&(0x7f0000000240)=""/50, 0x32}], 0x4) (async) mmap(&(0x7f0000ffa000/0x3000)=nil, 0x3000, 0xa, 0x110, r0, 0xffa75000) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0x1f87f9b6) (async) ioctl$BTRFS_IOC_INO_LOOKUP_USER(r0, 0xd000943e, &(0x7f00000002c0)={0x0, 0x0, "6fdc2659a13ec0f522902e8011a17ecf42109222a9dc210ffa2eb5176a136065325e1f8f1eecd84eede16798280562a09541b76c96e580fc958371f8926c91deb0ca1a4dd367b7e9fa951318aa01e20b5640aae27ba20e1bca56062b9843ea1672f9ee55fcb566f3319ffc6684a05e2cc529e1e64ce327434aee0771b8a5d7462c040334bfcd63183130d457522d249038ed6d7dfa93f1b1e6637bb61f70913793dbaf0ef439311c455499771925a6a5a6384a31e6528ef61d5a9518a63d7fb626e204b2dae1896b60a17aa570d3fe141e8ee7a373fbb7c6852f5c427c7bc0e0f80a80b5058f388c77b53aaf24fe39e877efa7206d3468fa9221ff69ece91c43", "2e1c8f7fe9679c33d9307a48b3d8855351c6c1d1fe427e217eb76967bb3b15b8bd728cbf2a0efae6b4248fd513868ca124942bcc1b59138251be9806d4054a53ea3704f50f706b73bdfb2487c746dc22018b3d160e08f44e4b010b8716a972b7255b4a88d0cbd92f43dcd329bc106fb6f7a97d255af7647b61bd15af14b387ced234ff9710161730aeaf621875c9fde2951fcd8e57583addb33392a6c869c2d2e29ba5f528933690be30726c17863b72100971963e9a4adad3a5316d34cff21600aeb45bb4fe5dd2f695e3de374f63a4db8922c039c448a668c971c2cd092d3232c7e18418744fe9d8749f1965400d4841e07de908d9fe4d9d60736c23c5204d15f2e0b2932dc85a92c4f0731dbaf141014419be4fad839f2353b532d7a0a6591b17d4292bf7c8f5ae0b5cdf2b18e665f189284d8bfaba1b23aacda193d237390d69c6ff6762fb972a87cd802852b87306d86ee4c22af8391f47e164f7a46c35c33f4c32389f36755fec7fdbca273b9560017c5983fe80f199563c385c1b30fed4e481cb133c5efda36620b544566af365b7b5fc2d948710c21b4140c93fe44ab6fcd7b689ed6b5388b5169dfc0813a54e84aac1c07e996128011a89de1175906fc56f64c7dd97b15f74035d335eb1d962e4d5f7a5a306cf8b9c6d290a3b529764a9ed3af169999ff896ee01fbba76ab2cc6b8936a7cef06094805d7ad41f01c3d7a11992e5d737667c20718135b0f52346780d6cfeec3e000cf44a799ffbea41be2ce841ef128290ff751c950658849cd17ce9bae5b530b712acb3cd835989c6a80c2b5c540356a4022e4b519827958a741d1c1928962658313a311ff9eb9e7909657584c5f5ed1699a8f943c7df8befc84104a75b7c0b42cc8543a16dfdff907edc1a6677de53515292b50bee9bc559200b09d2526beafbb3c6df4ba8ea9552a69adc56c9807450ce7a30987a661e5d6f313e5bf7d3bb755933c2c466fc03900980d7a8c9aaedd3dfc2ad7994dc86c3aecebca5b2c58f86f74231810bb2d840e617be60ebdd3b57eedb4e2f2081de8b6323844922c474dd38b3cb66abd8d5846d2bbe026a00cad367cb51d5ee8b30025ae1050347937461b98303ce806711000e9c335d52cc0ba75e3ad7bf6fca49a2e3ea30749669791f381b91ed27e97bb11af9be70ffe442b7f1144317de67eda7ce86aca8ec38ed3cab38f077a3ad08f4a87f364ef5ae3f4297486c0cbf1e0bb2ca954a706d0fe81251ecc6635e3d713d5d0ae3c423ead6fbf89a87f52fd44d0832f3a4da1229908dfe2cbbf22f622c9be5900180021bee83688bd95477302b02d994e70f065d1278ced4b9aadf4db252423264c85f5f39ba3d56e06538204213b7df290780c8b4733793587f3ca4b9d50bcb8fe06ba29fc5f8f29686e7baf923cc35da97b95ce96d0c02e7e22d653d4e48ee8c131fe80cf02e92839bf7e63dbddf0937b5fc00135af5006932fd47922c69513d82ecb2b97bb1ff6649b99e0d4f5d5e069d8dcfb4c56e30db8b6499409441b789571e3059028e283b5d94929c481c9420663fe044e7485e9f1726e02bb548e8298270139f596d1cca2628a8c04f6cb5ec352b23ae4c36e1b20c08a96fc9efa2d72992efa0e7877e81dccbd2ca42aa36b5d9795491e998bc681b4eb99af5146e5008dd78cd92ac398e4429c4eca0b901b92a2628641fcda99bd34015a924685f29bcf4e373bc11c818818a5b2d7aa356e9b0445aa2063f81085e6337c0b0efd24590e5d41a0715fc9f370af1fde9d0159dd25c46ae538cc84e4f617d1dafce988ca4282dbda7d74cab62483bc2ac8f773a83f6db5d5633f166fa880b76374eb6b1f150d9d3f31da28a0e1f741b30eb15f6f0587c49a0bde370720c81eee3670bcef26ac917a3a0bc600c0d4314b3b7d5f0bc2b930d91b6599ed7366a270963daa69a670ac3aca29a348da256e276a65dfb7125f48558ec70db4a9d49b8cedba0034f0136cb0c870e9111c6424c43a420277d931c0ed8f3296bb9c63783e54c68b7b5bdcc1c930ddbc446733c61698bb1073c31caa568dbacb131e132b9952fff40bec8656b0b5839b65af15bd873b9a2df86bcdd05c5a8e83b66befb987a85871b0dab1b46aa90c7862822b04ff3b494ac57ba76621833d1fecea9379dd389e1959ffe3ada16ef562cb58f8c7fe744b59782f14aa68c7c80e78aa00a9e04dee44f99e330859aad2466af2ada1f3e0667ce42ca155d1b966611b6a2c94926fc6b065292f8323293546a582d05b5ab2931a3785f542623b6989e8d714a815d9243e910a428833c3449e586cd64280612ab8092aa8a55b8bd0bdd21c534734bd7d3e0d823df70374842ba84e89a89062ce23d810ec60c394d5289f5ab16d69e0b3d7745adfc2e8cf12e76f04c528fa0796ccee7a5f2a364e1ace2fb26aee1aa41b543875fc124028595d2e9daa182264b1c247bd276a1a1ed0a35bef45115b81690dc62765dc49d342ac4f749894e42b14b5094623d04ef66f8d2d4e1cdb55452049d89d078191e51c216188385d846d5430bbbc8525a97b08b397cdb28b0e8466e629c7664f6c844851271e5cd85315bb8a483f48536b52ba572c9d6eb8aff004bf8a416e41c631835ad1cb890b3293c9a4d5299314bbf0ae1a3dbd54aa58443a5e5cba60cfbfd6051fad6b933e700d4dcc7efebf17ec72d9b447d82439b66bcc49ce16c8e7d635b3719eca5d0656e61cec439447d802e2ab8b4c716c78de011ebe4a84a2679ace5ad5e9738cfca9e640bf992d8bb8dd4d69005bd5a736bf37b100735e2699f560f162a9542192588084d26698bd52a007350d0602393706dc64e5b25b2ad8338b07e2c0bf140a4c9d83e0e6f7e7536c7b69fd48b3ecd6e8aab68f10e508ed002bb3b4f5601432741156940cf4475d0e4bb02f4074b4d2b083625f1d421236da2c176d62ac8a3ac6a9b15e52f797c2a9fa08ffbc17d7e480ac876c64f9f3b3fc78ca1dda58a16ef4f93aca3b32a890c503506f4564446743d0ad22a3b9d63583b1527a723684521fa39db38d7064c89688e57ecaae15738c5f51466ab10619c2beb944a6f47a171a1de7b0766f6b9aa13d97cfdbcb5eef761c96811673176a013829510e1ccdc72a721b55433fc704a2045742adfe1ca2d5cd25d1c9dbb6be7b22ecc16d31172797e9efbb2b1b048224c61dc47ffc9c564fd6be9154a48fc1c62433de5c90abca99d5003df69ebbed6c08b4a73d712d7debff75f83bc6b9d0237f63d8020edbe6de7f77a50a70103e3b1afae2665f4e2fd78f658f0a0907888567a8dfcdeb8dd6366245aa1d45ac2d54abd0725ca8bee6bb30b67885b7d3e196eca7577354abbc0f956190b288c942a96c05f76cf634f7b41542126742b33a75e125b2f87ec2c9d4970169de28de80b08b64e94725c193d6858ac77bf4c0d14ddfd90c9c2786688948ea9a85b305726b7c15e6e215ad2a071fdd72f2e4b132cb2498689a64d791833fc16cabaa87d35ab052670e62c7250b65d0780fa30e86dba82405ae403c8db0229aa4d089f3a23155d6339a916bce926433bed1f403dbb8d837bc8edc03f23c6bc202269b7c92a472b07f50c90989cbb7934ac2980556bbc6de9d1bc96dd7632bbe0f25c67e971d8c80beaeaa459b1d8f80ea043009059e2d7da33688f1a7b05b61cc28c75ca78fbb75afe1062293b39b48f805471ddfb85c8e903270b20a843f109d6f6d0a3f5cffeaf00a602b9467c6247276bb75ca6d5c20fb578128a8080ba94f07afa0c28526106627ac50724c37f61eabf15fb9acc7f9c796d25f883c88e4dcbe13b669cb55df9623740b061511b44f93796601ea359af03e8483a12341ea18a59b7477361c94ddcddfcb1a623061642582df0b85ad2ac59d93b5e6c9a9185ee8746ba50fc026eeb3c3b4c8ed02680556d6a2b560eb997d2815a886eb7a15aa2e3d8ea4184fa81288edcc790fd3ecd8753e067ecbcdc4bf9c1d9033fce70cd689899da85a13088b9b376b43a589fd88eadc85faba0a4dedf0c5d1dd850d4bab0608b32ecbbcf2fc9fd3de407d6eedb670b88922ccc39ad1408cd0bb68b3d83e97c13589e86c03cb0260ee9e0af9374c63293d2dbfc0d8edfee6460913de6c2fe286f9b1e5673f875aa6b1ff376e9ab3590e3726292e05dd51b48386d592e21463581eb3647d3a556f54b81bb8f200699491f44d5673ac52ebcf301b9b76a267d75cbb106ef02d06e6727b38b76554525b59d5ea585cce8c5b8fa8a0b26399408d03029096e99771ea5f268c6b54709452a845b885e9cdc315392c3e11d79ceeb85b490e3276d17c92d63fafa5c8ec088ee07059c9b0b842775f7e4fb8d03f9882fd11612e1d34d22263439c99cd601c79980f78507e18b6ccd8c51b653f2dcd11de7eb74f903ee87e5b946441b8cc1d9f09e36b06027980d9a2c51bac40711352ed4a2d957d43eb9a49169c72fbfd9ad79c17f14fc7091e94703752c9c9085d12e1c15bb93f7f86974382649994c8705fec8c97df620b75104c4d615b983ac37bb5b5c7e29edee7e4734737211d8ae85c8038d60ff30d6cc6225f3d5ac99070d79fd1514c4163b7b3166397e2c0e918224d27296d8fec84880e1a844360b55b01fc1b5b1169c2f539ab02f5d8a9814432ecbcaf578fb12d444ae762144f302fa70912110e647ff1372140414fc6262f671f5d3444cc661000167862f190e192a30357a7755d8e8522427ae0801a35641a6208960e58e3d266d7490c46b22b4890e53d1f39776dcc7a80a1b21dce4483a4b494dfa2e90e66fe26e17fdf5762fa5a2eaefcbbae15016ea51cd5ef9819e751586b15fb593cdf092b7d13847fd28723744572055853a55600b3dcdbc651f33998dc7b65072c6f4c69210772e43fce79131a5854d15eab49ee8dcbd0484eab7b05d8828b7d701e74b52fb0ebf95b21a9910a204313be10ccb4c069379215f39a14cd9011242e629efa1f77764eefcc9f1cff0c90ac17462a390e52f3de140e693923448a164713aabd31d787babde59c51a0212c92e52082fd659f5515b9c5c09b566984bab76ac1673ddcb8f12efe8c2330cb2571d34d837c4e2fc845e23ceb79edb00344eae915cbe32d44195ac2681cf29f80a6cacd318d63b461b7467ef1d69ec308063a8306a6beb28ab9bcfddfeec2c7567365a03d3dd68748cc5ff1dacb8878ad74f18c61edf5d4a52e319ce463b99a33302c5e424cc816e0e49b4c1f33ed96f11e0e207d1c1b181c099462d2b84f2b0ca3f0aacbe0ce62c03e01dfd85d7f215acd52374e0e0a390c415ca099c6336dffce680b688a52a1ace99433bec18359e84c32528003068a3240ef28d55b6f86cf90abc6a6205eb80c3ebe4fb82bb4de6e9c222793789a69ea"}) 10:42:04 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) socket$netlink(0x10, 0x3, 0x0) (async) r0 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r0, 0x0, 0x0) 10:42:04 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) r1 = socket$inet_sctp(0x2, 0x1, 0x84) getsockopt$inet_sctp_SCTP_MAX_BURST(r1, 0x84, 0xd, &(0x7f0000000000)=@assoc_value, &(0x7f00000000c0)=0x8) (async) mmap(&(0x7f0000ff4000/0xa000)=nil, 0xa000, 0x0, 0x4000010, r1, 0x892d0000) [ 2188.402833][ T8035] 8021q: adding VLAN 0 to HW filter on device bond1410 10:42:04 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r1 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r2 = openat$cgroup_ro(r1, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r2, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r2, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r2, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r2, 0x0, 0x0) sendmsg$RDMA_NLDEV_CMD_NEWLINK(r2, &(0x7f00000001c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x200}, 0xc, &(0x7f0000000180)={&(0x7f00000003c0)=ANY=[@ANYBLOB="d8000000031410002bbd7000fcdbdf250900020073797a320000000008004100727865001400330076657468305f746f5f626f6e640000000900020073797a30000000000800410072786500140033006970766c616e310000000000000000000900020073797a3000000000080041007369770014003300766c616e3000000000000000000000000900020073797a320000000008004100736977001400330073797a5f74756e0000000000000000000900020073797a310000000008004100727865001400330076657468305f746f5f62726964676500c481e8bcefa3e9b3b3aff9ccbd3d04de0cfc5f0ba546d962534f67b4c8843b23f9503eca9f9023fb28ba683bfb392e732bc7d306a31c03ee16777f83faee42c9b3348b4fa2570a2b60f4a1165d94abdae5188f9b953cef18f5a8bd2b1259ab08f370c159f55d9bd974e354"], 0xd8}, 0x1, 0x0, 0x0, 0x20008849}, 0x40000) [ 2188.663654][ T8043] bond1410: (slave bridge1274): making interface the new active one [ 2188.687705][ T8043] bond1410: (slave bridge1274): Enslaving as an active interface with an up link [ 2188.730283][ T8041] 8021q: adding VLAN 0 to HW filter on device bond851 [ 2188.785039][ T8049] bond851: (slave bridge1006): making interface the new active one [ 2188.803617][ T8049] bond851: (slave bridge1006): Enslaving as an active interface with an up link 10:42:04 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xf0ffffff}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:42:04 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) r1 = socket$inet_sctp(0x2, 0x1, 0x84) getsockopt$inet_sctp_SCTP_MAX_BURST(r1, 0x84, 0xd, &(0x7f0000000000)=@assoc_value, &(0x7f00000000c0)=0x8) (async) mmap(&(0x7f0000ff4000/0xa000)=nil, 0xa000, 0x0, 0x4000010, r1, 0x892d0000) 10:42:04 executing program 4: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r1 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r2 = openat$cgroup_ro(r1, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r2, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r2, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r2, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) (async) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r2, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r2, 0x0, 0x0) (async) write$tun(r2, 0x0, 0x0) sendmsg$RDMA_NLDEV_CMD_NEWLINK(r2, &(0x7f00000001c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x200}, 0xc, &(0x7f0000000180)={&(0x7f00000003c0)=ANY=[@ANYBLOB="d8000000031410002bbd7000fcdbdf250900020073797a320000000008004100727865001400330076657468305f746f5f626f6e640000000900020073797a30000000000800410072786500140033006970766c616e310000000000000000000900020073797a3000000000080041007369770014003300766c616e3000000000000000000000000900020073797a320000000008004100736977001400330073797a5f74756e0000000000000000000900020073797a310000000008004100727865001400330076657468305f746f5f62726964676500c481e8bcefa3e9b3b3aff9ccbd3d04de0cfc5f0ba546d962534f67b4c8843b23f9503eca9f9023fb28ba683bfb392e732bc7d306a31c03ee16777f83faee42c9b3348b4fa2570a2b60f4a1165d94abdae5188f9b953cef18f5a8bd2b1259ab08f370c159f55d9bd974e354"], 0xd8}, 0x1, 0x0, 0x0, 0x20008849}, 0x40000) 10:42:04 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) (async) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) r0 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) sendmsg$nl_route(r0, 0x0, 0x0) 10:42:04 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xf0ffffff}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2188.849834][ T8061] 8021q: adding VLAN 0 to HW filter on device bond1448 [ 2188.947174][ T8071] bridge1345: entered promiscuous mode [ 2188.953208][ T8071] bridge1345: entered allmulticast mode 10:42:05 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) r1 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r2 = openat$cgroup_ro(r1, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r2, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r2, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) (async) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r2, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) (async) write$tun(r2, 0x0, 0x0) (async) sendmsg$RDMA_NLDEV_CMD_NEWLINK(r2, &(0x7f00000001c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x200}, 0xc, &(0x7f0000000180)={&(0x7f00000003c0)=ANY=[@ANYBLOB="d8000000031410002bbd7000fcdbdf250900020073797a320000000008004100727865001400330076657468305f746f5f626f6e640000000900020073797a30000000000800410072786500140033006970766c616e310000000000000000000900020073797a3000000000080041007369770014003300766c616e3000000000000000000000000900020073797a320000000008004100736977001400330073797a5f74756e0000000000000000000900020073797a310000000008004100727865001400330076657468305f746f5f62726964676500c481e8bcefa3e9b3b3aff9ccbd3d04de0cfc5f0ba546d962534f67b4c8843b23f9503eca9f9023fb28ba683bfb392e732bc7d306a31c03ee16777f83faee42c9b3348b4fa2570a2b60f4a1165d94abdae5188f9b953cef18f5a8bd2b1259ab08f370c159f55d9bd974e354"], 0xd8}, 0x1, 0x0, 0x0, 0x20008849}, 0x40000) 10:42:05 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000000080)=ANY=[@ANYBLOB="2321202e2f66696c653020206d656d6f72792e6576656e747300206d656d6f72792e6576656e7473000a2fb9441087019256e69e8d46866a9c7402157e83bdf8b1f5417736ae5a9826786996cc9faf4d59db04d772735127d1a6d19063816b3f21da3be8df3b0b1a5696e554124ddc91bc28dca2a6cac9e74f72dcee5b83947081f1ba41de6bd7"], 0x87) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r1 = socket$inet6(0xa, 0x80000, 0x5) write$binfmt_script(r1, &(0x7f0000000140)={'#! ', './file0', [{0x20, '\xa0#'}, {0x20, '^{'}, {0x20, 'memory.events\x00'}, {0x20, '\x11'}, {0x20, '6%['}], 0xa, "3cbf5fc59e5574e723bb4f7964c84491ec70942cee541538843b71ab181035d9f05c107825285f91d91cb35298df4f7257afb9bc620a0a67d4f332dd017e285096f3a957a5aa267046dc07c74a7331fe776cd4a006d154bf284a1de61d62479531f5318219aabc429dec76acde6337ec06aba2c5c48ddef448d01e521c1e770f9cfbd911d4e796a1e91ad79eda"}, 0xb3) [ 2189.381186][ T8092] validate_nla: 4 callbacks suppressed [ 2189.381208][ T8092] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 10:42:05 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xffffffc3}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:05 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) r1 = syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r2 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r2, 0x0, 0x0) sendmsg$NL80211_CMD_DEL_INTERFACE(r0, &(0x7f00000000c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x200}, 0xc, &(0x7f0000000080)={&(0x7f0000000040)={0x20, r1, 0x200, 0x70bd27, 0x25dfdbff, {{}, {@void, @val={0xc, 0x99, {0x2, 0x4b}}}}, ["", "", "", "", "", "", "", ""]}, 0x20}, 0x1, 0x0, 0x0, 0x20000010}, 0x20000005) 10:42:05 executing program 4: getsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(0xffffffffffffffff, 0x84, 0x9, &(0x7f0000000080)={0x0, @in6={{0xa, 0x4e20, 0x0, @initdev={0xfe, 0x88, '\x00', 0x0, 0x0}, 0x7}}, 0x2400, 0x800, 0xfffff001, 0x7, 0x14b, 0x3, 0x81}, &(0x7f0000000140)=0x9c) openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000040)='cgroup.kill\x00', 0x275a, 0x0) socketpair$tipc(0x1e, 0x2, 0x0, &(0x7f0000000000)) r0 = syz_init_net_socket$bt_l2cap(0x1f, 0x1, 0x0) r1 = socket$inet6_sctp(0xa, 0x5, 0x84) setsockopt$inet_sctp6_SCTP_FRAGMENT_INTERLEAVE(r1, 0x84, 0x12, &(0x7f0000000180)=0x10000, 0x4) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbfe) 10:42:05 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000000080)=ANY=[@ANYBLOB="2321202e2f66696c653020206d656d6f72792e6576656e747300206d656d6f72792e6576656e7473000a2fb9441087019256e69e8d46866a9c7402157e83bdf8b1f5417736ae5a9826786996cc9faf4d59db04d772735127d1a6d19063816b3f21da3be8df3b0b1a5696e554124ddc91bc28dca2a6cac9e74f72dcee5b83947081f1ba41de6bd7"], 0x87) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r1 = socket$inet6(0xa, 0x80000, 0x5) write$binfmt_script(r1, &(0x7f0000000140)={'#! ', './file0', [{0x20, '\xa0#'}, {0x20, '^{'}, {0x20, 'memory.events\x00'}, {0x20, '\x11'}, {0x20, '6%['}], 0xa, "3cbf5fc59e5574e723bb4f7964c84491ec70942cee541538843b71ab181035d9f05c107825285f91d91cb35298df4f7257afb9bc620a0a67d4f332dd017e285096f3a957a5aa267046dc07c74a7331fe776cd4a006d154bf284a1de61d62479531f5318219aabc429dec76acde6337ec06aba2c5c48ddef448d01e521c1e770f9cfbd911d4e796a1e91ad79eda"}, 0xb3) 10:42:05 executing program 4: getsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(0xffffffffffffffff, 0x84, 0x9, &(0x7f0000000080)={0x0, @in6={{0xa, 0x4e20, 0x0, @initdev={0xfe, 0x88, '\x00', 0x0, 0x0}, 0x7}}, 0x2400, 0x800, 0xfffff001, 0x7, 0x14b, 0x3, 0x81}, &(0x7f0000000140)=0x9c) openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000040)='cgroup.kill\x00', 0x275a, 0x0) (async) socketpair$tipc(0x1e, 0x2, 0x0, &(0x7f0000000000)) (async) r0 = syz_init_net_socket$bt_l2cap(0x1f, 0x1, 0x0) r1 = socket$inet6_sctp(0xa, 0x5, 0x84) setsockopt$inet_sctp6_SCTP_FRAGMENT_INTERLEAVE(r1, 0x84, 0x12, &(0x7f0000000180)=0x10000, 0x4) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbfe) [ 2189.628735][ T8092] 8021q: adding VLAN 0 to HW filter on device bond1411 [ 2189.771471][ T8096] bridge1275: entered promiscuous mode [ 2189.779570][ T8096] bridge1275: entered allmulticast mode [ 2189.927187][ T8093] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2189.943747][ T8093] workqueue: Failed to create a rescuer kthread for wq "bond852": -EINTR [ 2190.060644][ T8124] netlink: 'syz-executor.3': attribute type 1 has an invalid length. 10:42:06 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) r1 = syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r2 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r2, 0x0, 0x0) (async) sendmsg$nl_route(r2, 0x0, 0x0) sendmsg$NL80211_CMD_DEL_INTERFACE(r0, &(0x7f00000000c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x200}, 0xc, &(0x7f0000000080)={&(0x7f0000000040)={0x20, r1, 0x200, 0x70bd27, 0x25dfdbff, {{}, {@void, @val={0xc, 0x99, {0x2, 0x4b}}}}, ["", "", "", "", "", "", "", ""]}, 0x20}, 0x1, 0x0, 0x0, 0x20000010}, 0x20000005) 10:42:06 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000000080)=ANY=[@ANYBLOB="2321202e2f66696c653020206d656d6f72792e6576656e747300206d656d6f72792e6576656e7473000a2fb9441087019256e69e8d46866a9c7402157e83bdf8b1f5417736ae5a9826786996cc9faf4d59db04d772735127d1a6d19063816b3f21da3be8df3b0b1a5696e554124ddc91bc28dca2a6cac9e74f72dcee5b83947081f1ba41de6bd7"], 0x87) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r1 = socket$inet6(0xa, 0x80000, 0x5) write$binfmt_script(r1, &(0x7f0000000140)={'#! ', './file0', [{0x20, '\xa0#'}, {0x20, '^{'}, {0x20, 'memory.events\x00'}, {0x20, '\x11'}, {0x20, '6%['}], 0xa, "3cbf5fc59e5574e723bb4f7964c84491ec70942cee541538843b71ab181035d9f05c107825285f91d91cb35298df4f7257afb9bc620a0a67d4f332dd017e285096f3a957a5aa267046dc07c74a7331fe776cd4a006d154bf284a1de61d62479531f5318219aabc429dec76acde6337ec06aba2c5c48ddef448d01e521c1e770f9cfbd911d4e796a1e91ad79eda"}, 0xb3) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) write$binfmt_script(r0, &(0x7f0000000080)=ANY=[@ANYBLOB="2321202e2f66696c653020206d656d6f72792e6576656e747300206d656d6f72792e6576656e7473000a2fb9441087019256e69e8d46866a9c7402157e83bdf8b1f5417736ae5a9826786996cc9faf4d59db04d772735127d1a6d19063816b3f21da3be8df3b0b1a5696e554124ddc91bc28dca2a6cac9e74f72dcee5b83947081f1ba41de6bd7"], 0x87) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) socket$inet6(0xa, 0x80000, 0x5) (async) write$binfmt_script(r1, &(0x7f0000000140)={'#! ', './file0', [{0x20, '\xa0#'}, {0x20, '^{'}, {0x20, 'memory.events\x00'}, {0x20, '\x11'}, {0x20, '6%['}], 0xa, "3cbf5fc59e5574e723bb4f7964c84491ec70942cee541538843b71ab181035d9f05c107825285f91d91cb35298df4f7257afb9bc620a0a67d4f332dd017e285096f3a957a5aa267046dc07c74a7331fe776cd4a006d154bf284a1de61d62479531f5318219aabc429dec76acde6337ec06aba2c5c48ddef448d01e521c1e770f9cfbd911d4e796a1e91ad79eda"}, 0xb3) (async) 10:42:06 executing program 4: getsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(0xffffffffffffffff, 0x84, 0x9, &(0x7f0000000080)={0x0, @in6={{0xa, 0x4e20, 0x0, @initdev={0xfe, 0x88, '\x00', 0x0, 0x0}, 0x7}}, 0x2400, 0x800, 0xfffff001, 0x7, 0x14b, 0x3, 0x81}, &(0x7f0000000140)=0x9c) (async) openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000040)='cgroup.kill\x00', 0x275a, 0x0) (async) socketpair$tipc(0x1e, 0x2, 0x0, &(0x7f0000000000)) (async) r0 = syz_init_net_socket$bt_l2cap(0x1f, 0x1, 0x0) (async) r1 = socket$inet6_sctp(0xa, 0x5, 0x84) setsockopt$inet_sctp6_SCTP_FRAGMENT_INTERLEAVE(r1, 0x84, 0x12, &(0x7f0000000180)=0x10000, 0x4) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbfe) 10:42:06 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xf2020000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) [ 2190.107013][ T8124] 8021q: adding VLAN 0 to HW filter on device bond1449 [ 2190.136481][ T8126] bridge1345: entered promiscuous mode [ 2190.150361][ T8126] bridge1345: entered allmulticast mode [ 2190.200668][ T8139] EXT4-fs warning: 7 callbacks suppressed [ 2190.200686][ T8139] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:42:06 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xf2020000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:06 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r0, 0x0, 0x8000000000004) setsockopt$inet_sctp_SCTP_RESET_STREAMS(r0, 0x84, 0x77, &(0x7f0000000000)={0x0, 0x6, 0x2, [0x2, 0x5]}, 0xc) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:06 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r0, 0x0, 0x8000000000004) setsockopt$CAN_RAW_FILTER(r0, 0x65, 0x1, &(0x7f0000000080)=[{{0x2, 0x0, 0x1, 0x1}, {0x1, 0x1, 0x1}}, {{0x4, 0x1, 0x1, 0x1}, {0x4, 0x1, 0x0, 0x1}}, {{0x1, 0x0, 0x1}, {0x4, 0x1, 0x1}}, {{0x3, 0x1, 0x1, 0x1}, {0x1}}, {{0x0, 0x0, 0x1, 0x1}, {0x2, 0x1, 0x1}}, {{0x0, 0x0, 0x1, 0x1}, {0x3}}, {{0x3, 0x1, 0x1, 0x1}, {0x1, 0x1}}, {{0x2, 0x0, 0x1, 0x1}, {0x1, 0x1, 0x1, 0x1}}, {{0x3, 0x0, 0x1, 0x1}, {0x3, 0x1, 0x1}}], 0x48) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:06 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xffffffe4}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:06 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) r1 = syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r2 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r2, 0x0, 0x0) sendmsg$NL80211_CMD_DEL_INTERFACE(r0, &(0x7f00000000c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x200}, 0xc, &(0x7f0000000080)={&(0x7f0000000040)={0x20, r1, 0x200, 0x70bd27, 0x25dfdbff, {{}, {@void, @val={0xc, 0x99, {0x2, 0x4b}}}}, ["", "", "", "", "", "", "", ""]}, 0x20}, 0x1, 0x0, 0x0, 0x20000010}, 0x20000005) mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (async) accept(0xffffffffffffffff, 0x0, 0x0) (async) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) socket$netlink(0x10, 0x3, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) sendmsg$nl_route(r2, 0x0, 0x0) (async) sendmsg$NL80211_CMD_DEL_INTERFACE(r0, &(0x7f00000000c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x200}, 0xc, &(0x7f0000000080)={&(0x7f0000000040)={0x20, r1, 0x200, 0x70bd27, 0x25dfdbff, {{}, {@void, @val={0xc, 0x99, {0x2, 0x4b}}}}, ["", "", "", "", "", "", "", ""]}, 0x20}, 0x1, 0x0, 0x0, 0x20000010}, 0x20000005) (async) [ 2190.611721][ T8162] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:42:06 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r0, 0x0, 0x8000000000004) (async) setsockopt$CAN_RAW_FILTER(r0, 0x65, 0x1, &(0x7f0000000080)=[{{0x2, 0x0, 0x1, 0x1}, {0x1, 0x1, 0x1}}, {{0x4, 0x1, 0x1, 0x1}, {0x4, 0x1, 0x0, 0x1}}, {{0x1, 0x0, 0x1}, {0x4, 0x1, 0x1}}, {{0x3, 0x1, 0x1, 0x1}, {0x1}}, {{0x0, 0x0, 0x1, 0x1}, {0x2, 0x1, 0x1}}, {{0x0, 0x0, 0x1, 0x1}, {0x3}}, {{0x3, 0x1, 0x1, 0x1}, {0x1, 0x1}}, {{0x2, 0x0, 0x1, 0x1}, {0x1, 0x1, 0x1, 0x1}}, {{0x3, 0x0, 0x1, 0x1}, {0x3, 0x1, 0x1}}], 0x48) (async) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2190.667872][ T8155] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2190.771079][ T8155] 8021q: adding VLAN 0 to HW filter on device bond852 10:42:06 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r0, 0x0, 0x8000000000004) (async, rerun: 64) setsockopt$CAN_RAW_FILTER(r0, 0x65, 0x1, &(0x7f0000000080)=[{{0x2, 0x0, 0x1, 0x1}, {0x1, 0x1, 0x1}}, {{0x4, 0x1, 0x1, 0x1}, {0x4, 0x1, 0x0, 0x1}}, {{0x1, 0x0, 0x1}, {0x4, 0x1, 0x1}}, {{0x3, 0x1, 0x1, 0x1}, {0x1}}, {{0x0, 0x0, 0x1, 0x1}, {0x2, 0x1, 0x1}}, {{0x0, 0x0, 0x1, 0x1}, {0x3}}, {{0x3, 0x1, 0x1, 0x1}, {0x1, 0x1}}, {{0x2, 0x0, 0x1, 0x1}, {0x1, 0x1, 0x1, 0x1}}, {{0x3, 0x0, 0x1, 0x1}, {0x3, 0x1, 0x1}}], 0x48) (async, rerun: 64) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:06 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r0, 0x0, 0x8000000000004) (async) setsockopt$inet_sctp_SCTP_RESET_STREAMS(r0, 0x84, 0x77, &(0x7f0000000000)={0x0, 0x6, 0x2, [0x2, 0x5]}, 0xc) (async, rerun: 32) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (rerun: 32) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2190.855860][ T8175] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2190.876614][ T8143] bond852: (slave bridge1007): making interface the new active one [ 2190.955502][ T8143] bond852: (slave bridge1007): Enslaving as an active interface with an up link 10:42:07 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xf2030000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) [ 2190.998511][ T8164] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2191.008451][ T8185] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:42:07 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) r1 = accept(0xffffffffffffffff, 0x0, 0x0) ioctl$FS_IOC_READ_VERITY_METADATA(r1, 0xc0286687, &(0x7f00000001c0)={0x1, 0x618723fd, 0x1000, &(0x7f0000000400)=""/4096}) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r2 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) bind$802154_dgram(r1, &(0x7f0000000200)={0x24, @short={0x2, 0x2, 0xaaa0}}, 0x14) r3 = syz_genetlink_get_family_id$fou(&(0x7f0000000040), r0) sendmsg$FOU_CMD_GET(r0, &(0x7f0000000180)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x400000}, 0xc, &(0x7f0000000100)={&(0x7f0000000080)={0x50, r3, 0x400, 0x70bd27, 0x25dfdbfd, {}, [@FOU_ATTR_PORT={0x6, 0x1, 0x4e22}, @FOU_ATTR_AF={0x5}, @FOU_ATTR_LOCAL_V6={0x14, 0x7, @dev={0xfe, 0x80, '\x00', 0x24}}, @FOU_ATTR_LOCAL_V6={0x14, 0x7, @private2={0xfc, 0x2, '\x00', 0x1}}, @FOU_ATTR_REMCSUM_NOPARTIAL={0x4}]}, 0x50}, 0x1, 0x0, 0x0, 0x800}, 0x4000000) sendmsg$nl_route(r2, 0x0, 0x0) 10:42:07 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$F2FS_IOC_FLUSH_DEVICE(r0, 0x4008f50a, &(0x7f0000000000)={0x8, 0x7a1}) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:07 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r0, 0x0, 0x8000000000004) (async, rerun: 64) setsockopt$inet_sctp_SCTP_RESET_STREAMS(r0, 0x84, 0x77, &(0x7f0000000000)={0x0, 0x6, 0x2, [0x2, 0x5]}, 0xc) (async, rerun: 64) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2191.203927][ T8193] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2191.318769][ T8198] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2191.327883][ T8164] 8021q: adding VLAN 0 to HW filter on device bond1412 [ 2191.371117][ T8167] netlink: 'syz-executor.3': attribute type 1 has an invalid length. [ 2191.479192][ T8167] 8021q: adding VLAN 0 to HW filter on device bond1450 [ 2191.579681][ T8170] bond1412: (slave bridge1275): making interface the new active one [ 2191.600516][ T8170] bond1412: (slave bridge1275): Enslaving as an active interface with an up link 10:42:07 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$F2FS_IOC_FLUSH_DEVICE(r0, 0x4008f50a, &(0x7f0000000000)={0x8, 0x7a1}) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:07 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (async) r1 = accept(0xffffffffffffffff, 0x0, 0x0) ioctl$FS_IOC_READ_VERITY_METADATA(r1, 0xc0286687, &(0x7f00000001c0)={0x1, 0x618723fd, 0x1000, &(0x7f0000000400)=""/4096}) (async) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) r2 = socket$netlink(0x10, 0x3, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) bind$802154_dgram(r1, &(0x7f0000000200)={0x24, @short={0x2, 0x2, 0xaaa0}}, 0x14) (async) r3 = syz_genetlink_get_family_id$fou(&(0x7f0000000040), r0) sendmsg$FOU_CMD_GET(r0, &(0x7f0000000180)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x400000}, 0xc, &(0x7f0000000100)={&(0x7f0000000080)={0x50, r3, 0x400, 0x70bd27, 0x25dfdbfd, {}, [@FOU_ATTR_PORT={0x6, 0x1, 0x4e22}, @FOU_ATTR_AF={0x5}, @FOU_ATTR_LOCAL_V6={0x14, 0x7, @dev={0xfe, 0x80, '\x00', 0x24}}, @FOU_ATTR_LOCAL_V6={0x14, 0x7, @private2={0xfc, 0x2, '\x00', 0x1}}, @FOU_ATTR_REMCSUM_NOPARTIAL={0x4}]}, 0x50}, 0x1, 0x0, 0x0, 0x800}, 0x4000000) (async) sendmsg$nl_route(r2, 0x0, 0x0) 10:42:07 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000000)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2191.628887][ T8171] bridge1345: entered promiscuous mode [ 2191.634641][ T8171] bridge1345: entered allmulticast mode [ 2191.724603][ T8171] bond1450: (slave bridge1345): making interface the new active one [ 2191.725313][ T8215] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2191.797273][ T8171] bond1450: (slave bridge1345): Enslaving as an active interface with an up link 10:42:07 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xfffffff0}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:07 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xf2030000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:07 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$F2FS_IOC_FLUSH_DEVICE(r0, 0x4008f50a, &(0x7f0000000000)={0x8, 0x7a1}) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) ioctl$F2FS_IOC_FLUSH_DEVICE(r0, 0x4008f50a, &(0x7f0000000000)={0x8, 0x7a1}) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) 10:42:07 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (async) r1 = accept(0xffffffffffffffff, 0x0, 0x0) ioctl$FS_IOC_READ_VERITY_METADATA(r1, 0xc0286687, &(0x7f00000001c0)={0x1, 0x618723fd, 0x1000, &(0x7f0000000400)=""/4096}) (async) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) r2 = socket$netlink(0x10, 0x3, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) bind$802154_dgram(r1, &(0x7f0000000200)={0x24, @short={0x2, 0x2, 0xaaa0}}, 0x14) (async) r3 = syz_genetlink_get_family_id$fou(&(0x7f0000000040), r0) sendmsg$FOU_CMD_GET(r0, &(0x7f0000000180)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x400000}, 0xc, &(0x7f0000000100)={&(0x7f0000000080)={0x50, r3, 0x400, 0x70bd27, 0x25dfdbfd, {}, [@FOU_ATTR_PORT={0x6, 0x1, 0x4e22}, @FOU_ATTR_AF={0x5}, @FOU_ATTR_LOCAL_V6={0x14, 0x7, @dev={0xfe, 0x80, '\x00', 0x24}}, @FOU_ATTR_LOCAL_V6={0x14, 0x7, @private2={0xfc, 0x2, '\x00', 0x1}}, @FOU_ATTR_REMCSUM_NOPARTIAL={0x4}]}, 0x50}, 0x1, 0x0, 0x0, 0x800}, 0x4000000) (async) sendmsg$nl_route(r2, 0x0, 0x0) 10:42:07 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000000)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2191.891142][ T8197] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2191.912013][ T8222] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2192.014712][ T8197] 8021q: adding VLAN 0 to HW filter on device bond853 [ 2192.094259][ T8238] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2192.186995][ T8204] bond853: (slave bridge1008): making interface the new active one 10:42:08 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xf4000000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:42:08 executing program 2: r0 = socket$inet6_udp(0xa, 0x2, 0x0) r1 = socket$nl_generic(0x10, 0x3, 0x10) r2 = syz_genetlink_get_family_id$l2tp(&(0x7f00000000c0), 0xffffffffffffffff) sendmsg$L2TP_CMD_SESSION_GET(r1, &(0x7f0000000180)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x400}, 0xc, &(0x7f0000000140)={&(0x7f0000000100)={0x2c, r2, 0x400, 0x70bd25, 0x25dfdbfe, {}, [@L2TP_ATTR_CONN_ID={0x8}, @L2TP_ATTR_CONN_ID={0x8, 0x9, 0x1}, @L2TP_ATTR_PEER_CONN_ID={0x8, 0xa, 0x3}]}, 0x2c}, 0x1, 0x0, 0x0, 0x41}, 0x2000000) ioctl$sock_SIOCGIFVLAN_SET_VLAN_NAME_TYPE_CMD(r0, 0x8982, &(0x7f0000000000)={0x6, 'xfrm0\x00', {0x7ff}}) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r3, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:08 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) r0 = bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000080)={0x6, 0x4, &(0x7f0000000b40)=ANY=[@ANYBLOB="18020000008e300000000000000000000000850000007b00000095000000000000003a64d725989c4db16f3baf71e8e1e0e0fda53265e4986af988aab70c9f5bf25e11ea397e02a20816d06d8cf70fb1ce0f722145d23429d9e7cfb38f06d6534257aa696d61d62cf6a1a78b21361809b6a0395715bdb64fb487cb0fad8a71b0bbd7cd3d179cd7e2adb6b26e449c1d44b4b691a0aa23604699a34918f4f3bdb02390cab447693161cfe5ce5e07eae50ceb073b257290594a250afbbfdd2d0fedeca5db427aaf183a78f82a56aa9868077e2bb77d3962eb7c76d84b5a07fea66f1738894a4ea1ccc9e9049ef948b9e97de4f14e721fa1ba5af84ad4"], &(0x7f0000000100)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) bpf$BPF_PROG_TEST_RUN(0xa, &(0x7f0000000000)={r0, 0xf000, 0x0, 0x41, 0x0, 0x0, 0x41, 0x0, 0x0, 0x0, 0x0, 0x0}, 0x48) r1 = socket$inet_sctp(0x2, 0x1, 0x84) getsockopt$inet_sctp_SCTP_MAX_BURST(r1, 0x84, 0xd, &(0x7f0000000000)=@assoc_value, &(0x7f00000000c0)=0x8) accept(r1, 0x0, &(0x7f0000000580)) socket(0x1e, 0x3, 0x5) ioctl$F2FS_IOC_MOVE_RANGE(r0, 0xc020f509, &(0x7f0000000140)={r0}) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(r0, 0x81f8943c, &(0x7f0000000180)) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r2 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r2, 0x0, 0x0) pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r3) accept4(0xffffffffffffffff, &(0x7f00000005c0)=@qipcrtr, &(0x7f0000000640)=0x80, 0x80800) sendmsg$IPVS_CMD_DEL_SERVICE(r3, &(0x7f0000000540)={&(0x7f0000000440)={0x10, 0x0, 0x0, 0x8000000}, 0xc, &(0x7f0000000500)={&(0x7f0000000480)={0x78, 0x0, 0x400, 0x70bd2b, 0x25dfdbfe, {}, [@IPVS_CMD_ATTR_TIMEOUT_UDP={0x8, 0x6, 0x1d2}, @IPVS_CMD_ATTR_TIMEOUT_TCP_FIN={0x8, 0x5, 0x101}, @IPVS_CMD_ATTR_TIMEOUT_TCP={0x8, 0x4, 0x28}, @IPVS_CMD_ATTR_DEST={0x4}, @IPVS_CMD_ATTR_DEST={0x48, 0x2, 0x0, 0x1, [@IPVS_DEST_ATTR_ADDR={0x14, 0x1, @ipv6=@dev={0xfe, 0x80, '\x00', 0x3b}}, @IPVS_DEST_ATTR_L_THRESH={0x8, 0x6, 0x192}, @IPVS_DEST_ATTR_ACTIVE_CONNS={0x8, 0x7, 0x10000}, @IPVS_DEST_ATTR_PORT={0x6, 0x2, 0x4e20}, @IPVS_DEST_ATTR_PERSIST_CONNS={0x8, 0x9, 0x95}, @IPVS_DEST_ATTR_ADDR_FAMILY={0x6, 0xb, 0xa}, @IPVS_DEST_ATTR_TUN_PORT={0x6, 0xe, 0x4e23}]}]}, 0x78}, 0x1, 0x0, 0x0, 0x533534bd8527f300}, 0x881) getsockname$packet(r3, &(0x7f0000000380), &(0x7f0000000400)=0x14) 10:42:08 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000000)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2192.228769][ T8204] bond853: (slave bridge1008): Enslaving as an active interface with an up link [ 2192.241151][ T8221] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2192.342357][ T8246] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2192.379745][ T8221] 8021q: adding VLAN 0 to HW filter on device bond1413 10:42:08 executing program 4: r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000140)='./cgroup.net/syz0\x00', 0x200002, 0x0) r1 = openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) r2 = openat$cgroup_ro(r0, &(0x7f00000000c0)='blkio.bfq.io_queued_recursive\x00', 0x0, 0x0) r3 = openat$cgroup_ro(r2, &(0x7f0000000100)='memory.stat\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r3, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r4 = socket$inet6(0xa, 0x3, 0x1000) r5 = socket$nl_route(0x10, 0x3, 0x0) r6 = socket(0x1, 0x803, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r5, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000380)=ANY=[@ANYBLOB="3c0000001000030400"/20, @ANYRES32=0x0, @ANYBLOB="00000000000000001400238009000100766c616e000000000400028008000400ebcd0f89de456ba2adba5bc0cbb38b86f51d2444d23dbcb1d7d7a96ffabda537ba2479c1b7939feaa8a9ed792509f03e47cb4cc634e742052dd5fdd7e36d80dc45c3a7194f5028415765c85a83f5ddd226ef979ef7eee3897192806f5e34648413245e4ec6f47524a92251b28df9d14364ef485ed176172c4061c3ecbd0f37e5227cff413208ad843af68aa6f700b486a03bffa54bd1878d852873ae42fd58f5a642ebba42bcaceb382b8611f1c5c508c40f61c39628dd64b289b9bf00cfeb30d006e3570be09f1805f1f5d5ac48d2bbf2a2476ed0b52f9d0ec82dcc742cb59d4eee99f875c4e867e4ba3fca6a9952932c25ba5b2728355459b7665b64f2874f", @ANYRES32=r7, @ANYBLOB], 0x3c}}, 0x40000) sendmsg$nl_route_sched(r1, &(0x7f0000000240)={&(0x7f0000000180), 0xc, &(0x7f0000000200)={&(0x7f00000002c0)=ANY=[@ANYBLOB="34000000660000012dbd7000fedbdf250000000087de1b3013ae25f4ae0850d462d667a972fac0a18ecb8b7d3e87273d599e423c083f844f47686bbc623df8b07c0c4dac14e6e4e658da36ecc93ba5ea93a1ba37154d032abc1c82e97bcaa3d058481b478fa88168e6d49b8c68e80efb3e7465293f25e3dd521267bb94c530d144c12b5295f1a3916134a3a4eef4c3ebe026540dd81093c885b2f041523d", @ANYRES32=r7, @ANYBLOB="f1ff0000080005000d00020008000b000800000008000b0007000000"], 0x34}, 0x1, 0x0, 0x0, 0x40040b0}, 0x4800) ioctl$sock_kcm_SIOCKCMCLONE(r2, 0x89e2, &(0x7f00000001c0)={r1}) getsockopt$inet_sctp_SCTP_I_WANT_MAPPED_V4_ADDR(r8, 0x84, 0xc, &(0x7f00000004c0), &(0x7f0000000500)=0x4) r9 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r9, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) preadv(r9, &(0x7f00000007c0)=[{&(0x7f0000000540)=""/114, 0x72}, {&(0x7f00000005c0)=""/233, 0xe9}, {&(0x7f00000006c0)=""/54, 0x36}, {&(0x7f0000000700)=""/158, 0x9e}], 0x4, 0x100, 0xfffffff7) getsockopt$inet_sctp_SCTP_PR_ASSOC_STATUS(0xffffffffffffffff, 0x84, 0x73, &(0x7f0000000000)={0x0, 0x3, 0x0, 0x81, 0x1000}, &(0x7f0000000080)=0x18) mmap(&(0x7f0000ffc000/0x4000)=nil, 0x4000, 0x0, 0x810, r4, 0x14b70000) 10:42:08 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) r0 = bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000080)={0x6, 0x4, &(0x7f0000000b40)=ANY=[@ANYBLOB="18020000008e300000000000000000000000850000007b00000095000000000000003a64d725989c4db16f3baf71e8e1e0e0fda53265e4986af988aab70c9f5bf25e11ea397e02a20816d06d8cf70fb1ce0f722145d23429d9e7cfb38f06d6534257aa696d61d62cf6a1a78b21361809b6a0395715bdb64fb487cb0fad8a71b0bbd7cd3d179cd7e2adb6b26e449c1d44b4b691a0aa23604699a34918f4f3bdb02390cab447693161cfe5ce5e07eae50ceb073b257290594a250afbbfdd2d0fedeca5db427aaf183a78f82a56aa9868077e2bb77d3962eb7c76d84b5a07fea66f1738894a4ea1ccc9e9049ef948b9e97de4f14e721fa1ba5af84ad4"], &(0x7f0000000100)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) bpf$BPF_PROG_TEST_RUN(0xa, &(0x7f0000000000)={r0, 0xf000, 0x0, 0x41, 0x0, 0x0, 0x41, 0x0, 0x0, 0x0, 0x0, 0x0}, 0x48) (async) bpf$BPF_PROG_TEST_RUN(0xa, &(0x7f0000000000)={r0, 0xf000, 0x0, 0x41, 0x0, 0x0, 0x41, 0x0, 0x0, 0x0, 0x0, 0x0}, 0x48) r1 = socket$inet_sctp(0x2, 0x1, 0x84) getsockopt$inet_sctp_SCTP_MAX_BURST(r1, 0x84, 0xd, &(0x7f0000000000)=@assoc_value, &(0x7f00000000c0)=0x8) (async) getsockopt$inet_sctp_SCTP_MAX_BURST(r1, 0x84, 0xd, &(0x7f0000000000)=@assoc_value, &(0x7f00000000c0)=0x8) accept(r1, 0x0, &(0x7f0000000580)) (async) accept(r1, 0x0, &(0x7f0000000580)) socket(0x1e, 0x3, 0x5) ioctl$F2FS_IOC_MOVE_RANGE(r0, 0xc020f509, &(0x7f0000000140)={r0}) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(r0, 0x81f8943c, &(0x7f0000000180)) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$netlink(0x10, 0x3, 0x0) (async) r2 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r2, 0x0, 0x0) (async) sendmsg$nl_route(r2, 0x0, 0x0) pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r3) accept4(0xffffffffffffffff, &(0x7f00000005c0)=@qipcrtr, &(0x7f0000000640)=0x80, 0x80800) sendmsg$IPVS_CMD_DEL_SERVICE(r3, &(0x7f0000000540)={&(0x7f0000000440)={0x10, 0x0, 0x0, 0x8000000}, 0xc, &(0x7f0000000500)={&(0x7f0000000480)={0x78, 0x0, 0x400, 0x70bd2b, 0x25dfdbfe, {}, [@IPVS_CMD_ATTR_TIMEOUT_UDP={0x8, 0x6, 0x1d2}, @IPVS_CMD_ATTR_TIMEOUT_TCP_FIN={0x8, 0x5, 0x101}, @IPVS_CMD_ATTR_TIMEOUT_TCP={0x8, 0x4, 0x28}, @IPVS_CMD_ATTR_DEST={0x4}, @IPVS_CMD_ATTR_DEST={0x48, 0x2, 0x0, 0x1, [@IPVS_DEST_ATTR_ADDR={0x14, 0x1, @ipv6=@dev={0xfe, 0x80, '\x00', 0x3b}}, @IPVS_DEST_ATTR_L_THRESH={0x8, 0x6, 0x192}, @IPVS_DEST_ATTR_ACTIVE_CONNS={0x8, 0x7, 0x10000}, @IPVS_DEST_ATTR_PORT={0x6, 0x2, 0x4e20}, @IPVS_DEST_ATTR_PERSIST_CONNS={0x8, 0x9, 0x95}, @IPVS_DEST_ATTR_ADDR_FAMILY={0x6, 0xb, 0xa}, @IPVS_DEST_ATTR_TUN_PORT={0x6, 0xe, 0x4e23}]}]}, 0x78}, 0x1, 0x0, 0x0, 0x533534bd8527f300}, 0x881) getsockname$packet(r3, &(0x7f0000000380), &(0x7f0000000400)=0x14) (async) getsockname$packet(r3, &(0x7f0000000380), &(0x7f0000000400)=0x14) [ 2192.582065][ T8224] bond1413: (slave bridge1276): making interface the new active one [ 2192.643205][ T8224] bond1413: (slave bridge1276): Enslaving as an active interface with an up link [ 2192.660368][ T8228] netlink: 'syz-executor.3': attribute type 1 has an invalid length. 10:42:08 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xf4030000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2192.774584][ T8228] 8021q: adding VLAN 0 to HW filter on device bond1451 [ 2192.825602][ T8237] bridge1346: entered promiscuous mode [ 2192.831225][ T8237] bridge1346: entered allmulticast mode [ 2192.880469][ T8237] bond1451: (slave bridge1346): making interface the new active one [ 2192.909642][ T8237] bond1451: (slave bridge1346): Enslaving as an active interface with an up link 10:42:09 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xfffffffe}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:09 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (async) accept(0xffffffffffffffff, 0x0, 0x0) r0 = bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000080)={0x6, 0x4, &(0x7f0000000b40)=ANY=[@ANYBLOB="18020000008e300000000000000000000000850000007b00000095000000000000003a64d725989c4db16f3baf71e8e1e0e0fda53265e4986af988aab70c9f5bf25e11ea397e02a20816d06d8cf70fb1ce0f722145d23429d9e7cfb38f06d6534257aa696d61d62cf6a1a78b21361809b6a0395715bdb64fb487cb0fad8a71b0bbd7cd3d179cd7e2adb6b26e449c1d44b4b691a0aa23604699a34918f4f3bdb02390cab447693161cfe5ce5e07eae50ceb073b257290594a250afbbfdd2d0fedeca5db427aaf183a78f82a56aa9868077e2bb77d3962eb7c76d84b5a07fea66f1738894a4ea1ccc9e9049ef948b9e97de4f14e721fa1ba5af84ad4"], &(0x7f0000000100)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) bpf$BPF_PROG_TEST_RUN(0xa, &(0x7f0000000000)={r0, 0xf000, 0x0, 0x41, 0x0, 0x0, 0x41, 0x0, 0x0, 0x0, 0x0, 0x0}, 0x48) (async) r1 = socket$inet_sctp(0x2, 0x1, 0x84) getsockopt$inet_sctp_SCTP_MAX_BURST(r1, 0x84, 0xd, &(0x7f0000000000)=@assoc_value, &(0x7f00000000c0)=0x8) (async) accept(r1, 0x0, &(0x7f0000000580)) (async) socket(0x1e, 0x3, 0x5) (async) ioctl$F2FS_IOC_MOVE_RANGE(r0, 0xc020f509, &(0x7f0000000140)={r0}) (async) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(r0, 0x81f8943c, &(0x7f0000000180)) (async) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) r2 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) sendmsg$nl_route(r2, 0x0, 0x0) (async) pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r3) (async) accept4(0xffffffffffffffff, &(0x7f00000005c0)=@qipcrtr, &(0x7f0000000640)=0x80, 0x80800) (async) sendmsg$IPVS_CMD_DEL_SERVICE(r3, &(0x7f0000000540)={&(0x7f0000000440)={0x10, 0x0, 0x0, 0x8000000}, 0xc, &(0x7f0000000500)={&(0x7f0000000480)={0x78, 0x0, 0x400, 0x70bd2b, 0x25dfdbfe, {}, [@IPVS_CMD_ATTR_TIMEOUT_UDP={0x8, 0x6, 0x1d2}, @IPVS_CMD_ATTR_TIMEOUT_TCP_FIN={0x8, 0x5, 0x101}, @IPVS_CMD_ATTR_TIMEOUT_TCP={0x8, 0x4, 0x28}, @IPVS_CMD_ATTR_DEST={0x4}, @IPVS_CMD_ATTR_DEST={0x48, 0x2, 0x0, 0x1, [@IPVS_DEST_ATTR_ADDR={0x14, 0x1, @ipv6=@dev={0xfe, 0x80, '\x00', 0x3b}}, @IPVS_DEST_ATTR_L_THRESH={0x8, 0x6, 0x192}, @IPVS_DEST_ATTR_ACTIVE_CONNS={0x8, 0x7, 0x10000}, @IPVS_DEST_ATTR_PORT={0x6, 0x2, 0x4e20}, @IPVS_DEST_ATTR_PERSIST_CONNS={0x8, 0x9, 0x95}, @IPVS_DEST_ATTR_ADDR_FAMILY={0x6, 0xb, 0xa}, @IPVS_DEST_ATTR_TUN_PORT={0x6, 0xe, 0x4e23}]}]}, 0x78}, 0x1, 0x0, 0x0, 0x533534bd8527f300}, 0x881) (async) getsockname$packet(r3, &(0x7f0000000380), &(0x7f0000000400)=0x14) 10:42:09 executing program 2: r0 = socket$inet6_udp(0xa, 0x2, 0x0) r1 = socket$nl_generic(0x10, 0x3, 0x10) (async) r2 = syz_genetlink_get_family_id$l2tp(&(0x7f00000000c0), 0xffffffffffffffff) sendmsg$L2TP_CMD_SESSION_GET(r1, &(0x7f0000000180)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x400}, 0xc, &(0x7f0000000140)={&(0x7f0000000100)={0x2c, r2, 0x400, 0x70bd25, 0x25dfdbfe, {}, [@L2TP_ATTR_CONN_ID={0x8}, @L2TP_ATTR_CONN_ID={0x8, 0x9, 0x1}, @L2TP_ATTR_PEER_CONN_ID={0x8, 0xa, 0x3}]}, 0x2c}, 0x1, 0x0, 0x0, 0x41}, 0x2000000) ioctl$sock_SIOCGIFVLAN_SET_VLAN_NAME_TYPE_CMD(r0, 0x8982, &(0x7f0000000000)={0x6, 'xfrm0\x00', {0x7ff}}) (async) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r3, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2192.990451][ T8249] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 10:42:09 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) ioctl$F2FS_IOC_MOVE_RANGE(r0, 0xc020f509, &(0x7f0000000000)={r0, 0xc, 0x100000000, 0x1}) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r2, 0x0, 0x8000000000004) pipe(&(0x7f0000000080)={0xffffffffffffffff, 0xffffffffffffffff}) openat$cgroup_ro(r1, &(0x7f00000000c0)='blkio.bfq.io_service_bytes_recursive\x00', 0x0, 0x0) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r5, 0x0, 0x8000000000004) accept4(r3, &(0x7f0000000180)=@pppol2tpv3={0x18, 0x1, {0x0, 0xffffffffffffffff, {0x2, 0x0, @initdev}}}, &(0x7f0000000100)=0x80, 0x0) accept4$alg(r4, 0x0, 0x0, 0x800) pipe(&(0x7f0000000200)) pipe(&(0x7f0000000240)={0xffffffffffffffff}) syz_genetlink_get_family_id$nl80211(0x0, r6) r7 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r7, 0x0, 0x0) [ 2193.124091][ T8249] 8021q: adding VLAN 0 to HW filter on device bond854 10:42:09 executing program 4: r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000140)='./cgroup.net/syz0\x00', 0x200002, 0x0) r1 = openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) r2 = openat$cgroup_ro(r0, &(0x7f00000000c0)='blkio.bfq.io_queued_recursive\x00', 0x0, 0x0) r3 = openat$cgroup_ro(r2, &(0x7f0000000100)='memory.stat\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r3, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r4 = socket$inet6(0xa, 0x3, 0x1000) r5 = socket$nl_route(0x10, 0x3, 0x0) r6 = socket(0x1, 0x803, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r5, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000380)=ANY=[@ANYBLOB="3c0000001000030400"/20, @ANYRES32=0x0, @ANYBLOB="00000000000000001400238009000100766c616e000000000400028008000400ebcd0f89de456ba2adba5bc0cbb38b86f51d2444d23dbcb1d7d7a96ffabda537ba2479c1b7939feaa8a9ed792509f03e47cb4cc634e742052dd5fdd7e36d80dc45c3a7194f5028415765c85a83f5ddd226ef979ef7eee3897192806f5e34648413245e4ec6f47524a92251b28df9d14364ef485ed176172c4061c3ecbd0f37e5227cff413208ad843af68aa6f700b486a03bffa54bd1878d852873ae42fd58f5a642ebba42bcaceb382b8611f1c5c508c40f61c39628dd64b289b9bf00cfeb30d006e3570be09f1805f1f5d5ac48d2bbf2a2476ed0b52f9d0ec82dcc742cb59d4eee99f875c4e867e4ba3fca6a9952932c25ba5b2728355459b7665b64f2874f", @ANYRES32=r7, @ANYBLOB], 0x3c}}, 0x40000) sendmsg$nl_route_sched(r1, &(0x7f0000000240)={&(0x7f0000000180), 0xc, &(0x7f0000000200)={&(0x7f00000002c0)=ANY=[@ANYBLOB="34000000660000012dbd7000fedbdf250000000087de1b3013ae25f4ae0850d462d667a972fac0a18ecb8b7d3e87273d599e423c083f844f47686bbc623df8b07c0c4dac14e6e4e658da36ecc93ba5ea93a1ba37154d032abc1c82e97bcaa3d058481b478fa88168e6d49b8c68e80efb3e7465293f25e3dd521267bb94c530d144c12b5295f1a3916134a3a4eef4c3ebe026540dd81093c885b2f041523d", @ANYRES32=r7, @ANYBLOB="f1ff0000080005000d00020008000b000800000008000b0007000000"], 0x34}, 0x1, 0x0, 0x0, 0x40040b0}, 0x4800) ioctl$sock_kcm_SIOCKCMCLONE(r2, 0x89e2, &(0x7f00000001c0)={r1}) getsockopt$inet_sctp_SCTP_I_WANT_MAPPED_V4_ADDR(r8, 0x84, 0xc, &(0x7f00000004c0), &(0x7f0000000500)=0x4) r9 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r9, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) preadv(r9, &(0x7f00000007c0)=[{&(0x7f0000000540)=""/114, 0x72}, {&(0x7f00000005c0)=""/233, 0xe9}, {&(0x7f00000006c0)=""/54, 0x36}, {&(0x7f0000000700)=""/158, 0x9e}], 0x4, 0x100, 0xfffffff7) getsockopt$inet_sctp_SCTP_PR_ASSOC_STATUS(0xffffffffffffffff, 0x84, 0x73, &(0x7f0000000000)={0x0, 0x3, 0x0, 0x81, 0x1000}, &(0x7f0000000080)=0x18) mmap(&(0x7f0000ffc000/0x4000)=nil, 0x4000, 0x0, 0x810, r4, 0x14b70000) openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000140)='./cgroup.net/syz0\x00', 0x200002, 0x0) (async) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) (async) openat$cgroup_ro(r0, &(0x7f00000000c0)='blkio.bfq.io_queued_recursive\x00', 0x0, 0x0) (async) openat$cgroup_ro(r2, &(0x7f0000000100)='memory.stat\x00', 0x275a, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r3, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) socket$inet6(0xa, 0x3, 0x1000) (async) socket$nl_route(0x10, 0x3, 0x0) (async) socket(0x1, 0x803, 0x0) (async) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) (async) sendmsg$nl_route(r5, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000380)=ANY=[@ANYBLOB="3c0000001000030400"/20, @ANYRES32=0x0, @ANYBLOB="00000000000000001400238009000100766c616e000000000400028008000400ebcd0f89de456ba2adba5bc0cbb38b86f51d2444d23dbcb1d7d7a96ffabda537ba2479c1b7939feaa8a9ed792509f03e47cb4cc634e742052dd5fdd7e36d80dc45c3a7194f5028415765c85a83f5ddd226ef979ef7eee3897192806f5e34648413245e4ec6f47524a92251b28df9d14364ef485ed176172c4061c3ecbd0f37e5227cff413208ad843af68aa6f700b486a03bffa54bd1878d852873ae42fd58f5a642ebba42bcaceb382b8611f1c5c508c40f61c39628dd64b289b9bf00cfeb30d006e3570be09f1805f1f5d5ac48d2bbf2a2476ed0b52f9d0ec82dcc742cb59d4eee99f875c4e867e4ba3fca6a9952932c25ba5b2728355459b7665b64f2874f", @ANYRES32=r7, @ANYBLOB], 0x3c}}, 0x40000) (async) sendmsg$nl_route_sched(r1, &(0x7f0000000240)={&(0x7f0000000180), 0xc, &(0x7f0000000200)={&(0x7f00000002c0)=ANY=[@ANYBLOB="34000000660000012dbd7000fedbdf250000000087de1b3013ae25f4ae0850d462d667a972fac0a18ecb8b7d3e87273d599e423c083f844f47686bbc623df8b07c0c4dac14e6e4e658da36ecc93ba5ea93a1ba37154d032abc1c82e97bcaa3d058481b478fa88168e6d49b8c68e80efb3e7465293f25e3dd521267bb94c530d144c12b5295f1a3916134a3a4eef4c3ebe026540dd81093c885b2f041523d", @ANYRES32=r7, @ANYBLOB="f1ff0000080005000d00020008000b000800000008000b0007000000"], 0x34}, 0x1, 0x0, 0x0, 0x40040b0}, 0x4800) (async) ioctl$sock_kcm_SIOCKCMCLONE(r2, 0x89e2, &(0x7f00000001c0)={r1}) (async) getsockopt$inet_sctp_SCTP_I_WANT_MAPPED_V4_ADDR(r8, 0x84, 0xc, &(0x7f00000004c0), &(0x7f0000000500)=0x4) (async) openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) (async) openat$cgroup_ro(r9, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) (async) preadv(r9, &(0x7f00000007c0)=[{&(0x7f0000000540)=""/114, 0x72}, {&(0x7f00000005c0)=""/233, 0xe9}, {&(0x7f00000006c0)=""/54, 0x36}, {&(0x7f0000000700)=""/158, 0x9e}], 0x4, 0x100, 0xfffffff7) (async) getsockopt$inet_sctp_SCTP_PR_ASSOC_STATUS(0xffffffffffffffff, 0x84, 0x73, &(0x7f0000000000)={0x0, 0x3, 0x0, 0x81, 0x1000}, &(0x7f0000000080)=0x18) (async) mmap(&(0x7f0000ffc000/0x4000)=nil, 0x4000, 0x0, 0x810, r4, 0x14b70000) (async) 10:42:09 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xfa030000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) [ 2193.201267][ T8252] bond854: (slave bridge1009): making interface the new active one [ 2193.231517][ T8252] bond854: (slave bridge1009): Enslaving as an active interface with an up link 10:42:09 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) ioctl$F2FS_IOC_MOVE_RANGE(r0, 0xc020f509, &(0x7f0000000000)={r0, 0xc, 0x100000000, 0x1}) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r2, 0x0, 0x8000000000004) (async) sendfile(0xffffffffffffffff, r2, 0x0, 0x8000000000004) pipe(&(0x7f0000000080)) (async) pipe(&(0x7f0000000080)={0xffffffffffffffff, 0xffffffffffffffff}) openat$cgroup_ro(r1, &(0x7f00000000c0)='blkio.bfq.io_service_bytes_recursive\x00', 0x0, 0x0) (async) openat$cgroup_ro(r1, &(0x7f00000000c0)='blkio.bfq.io_service_bytes_recursive\x00', 0x0, 0x0) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r5, 0x0, 0x8000000000004) accept4(r3, &(0x7f0000000180)=@pppol2tpv3={0x18, 0x1, {0x0, 0xffffffffffffffff, {0x2, 0x0, @initdev}}}, &(0x7f0000000100)=0x80, 0x0) accept4$alg(r4, 0x0, 0x0, 0x800) pipe(&(0x7f0000000200)) pipe(&(0x7f0000000240)={0xffffffffffffffff}) syz_genetlink_get_family_id$nl80211(0x0, r6) socket$netlink(0x10, 0x3, 0x0) (async) r7 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r7, 0x0, 0x0) [ 2193.431862][ T8266] 8021q: adding VLAN 0 to HW filter on device bond1414 [ 2193.586706][ T8267] bond1414: (slave bridge1277): making interface the new active one [ 2193.643561][ T8267] bond1414: (slave bridge1277): Enslaving as an active interface with an up link 10:42:09 executing program 2: r0 = socket$inet6_udp(0xa, 0x2, 0x0) (async) r1 = socket$nl_generic(0x10, 0x3, 0x10) (async) r2 = syz_genetlink_get_family_id$l2tp(&(0x7f00000000c0), 0xffffffffffffffff) sendmsg$L2TP_CMD_SESSION_GET(r1, &(0x7f0000000180)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x400}, 0xc, &(0x7f0000000140)={&(0x7f0000000100)={0x2c, r2, 0x400, 0x70bd25, 0x25dfdbfe, {}, [@L2TP_ATTR_CONN_ID={0x8}, @L2TP_ATTR_CONN_ID={0x8, 0x9, 0x1}, @L2TP_ATTR_PEER_CONN_ID={0x8, 0xa, 0x3}]}, 0x2c}, 0x1, 0x0, 0x0, 0x41}, 0x2000000) (async) ioctl$sock_SIOCGIFVLAN_SET_VLAN_NAME_TYPE_CMD(r0, 0x8982, &(0x7f0000000000)={0x6, 'xfrm0\x00', {0x7ff}}) (async) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r3, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:09 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xfa030000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2193.774435][ T8280] 8021q: adding VLAN 0 to HW filter on device bond1452 10:42:09 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) ioctl$F2FS_IOC_MOVE_RANGE(r0, 0xc020f509, &(0x7f0000000000)={r0, 0xc, 0x100000000, 0x1}) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r2, 0x0, 0x8000000000004) (async) sendfile(0xffffffffffffffff, r2, 0x0, 0x8000000000004) pipe(&(0x7f0000000080)={0xffffffffffffffff, 0xffffffffffffffff}) openat$cgroup_ro(r1, &(0x7f00000000c0)='blkio.bfq.io_service_bytes_recursive\x00', 0x0, 0x0) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r5, 0x0, 0x8000000000004) (async) sendfile(0xffffffffffffffff, r5, 0x0, 0x8000000000004) accept4(r3, &(0x7f0000000180)=@pppol2tpv3={0x18, 0x1, {0x0, 0xffffffffffffffff, {0x2, 0x0, @initdev}}}, &(0x7f0000000100)=0x80, 0x0) accept4$alg(r4, 0x0, 0x0, 0x800) pipe(&(0x7f0000000200)) pipe(&(0x7f0000000240)) (async) pipe(&(0x7f0000000240)={0xffffffffffffffff}) syz_genetlink_get_family_id$nl80211(0x0, r6) (async) syz_genetlink_get_family_id$nl80211(0x0, r6) r7 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r7, 0x0, 0x0) [ 2193.861908][ T8281] bridge1347: entered promiscuous mode [ 2193.867806][ T8281] bridge1347: entered allmulticast mode [ 2193.993396][ T8281] bond1452: (slave bridge1347): making interface the new active one [ 2194.014816][ T8281] bond1452: (slave bridge1347): Enslaving as an active interface with an up link 10:42:10 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xffffffff}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:10 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r0, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r0, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r0, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r0, 0x0, 0x0) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r1, &(0x7f0000000100)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2000002, 0x28011, r1, 0x0) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r3 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r3, r2, 0x0, 0x10000a006) r4 = gettid() r5 = syz_init_net_socket$bt_hci(0x1f, 0x3, 0x1) r6 = bpf$BPF_LINK_CREATE(0x1c, &(0x7f0000000280)={r1, r1, 0x4}, 0x10) ioctl$sock_SIOCGPGRP(r1, 0x8904, &(0x7f00000002c0)=0x0) r8 = getgid() r9 = socket$inet6_udp(0x1c, 0x2, 0x0) sendto$inet6(r9, 0x0, 0x0, 0x0, &(0x7f0000000080)={0x1c, 0x1c}, 0x1c) r10 = socket$inet6_udp(0x1c, 0x2, 0x0) sendto$inet6(r10, 0x0, 0x0, 0x0, &(0x7f0000000080)={0x1c, 0x1c, 0x0, @rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01'}, 0x1c) sendmsg$netlink(r3, &(0x7f0000000400)={&(0x7f00000000c0)=@kern={0x10, 0x0, 0x0, 0x800000}, 0xc, &(0x7f0000000240)=[{&(0x7f00000004c0)={0x22d4, 0x22, 0x800, 0x70bd28, 0x25dfdbfb, "", [@typed={0x8, 0x10, 0x0, 0x0, @pid}, @nested={0x1174, 0x2, 0x0, 0x1, [@typed={0x8, 0x8d, 0x0, 0x0, @pid=r4}, @generic="cdb693b25691092fa1ed0827addb58779865b2b26a68400434a186a3227e8770c9607d9aa7f6eb0e94aac247006a52b400b0a4200bbccae6b3de57e09522fa75425af9994f5b1fbdb776eea16b26865ecd591c57bccc6e", @generic="f86c0b7669bc4867d98340c0e21471e98c427fa52edc6c8e704d038ced6d119f31a5de74ae52ccf67c8bf3fb728ddfbb6d3050952a94476fbeb7fb637fa01cce6422fb5c28345c6152f03b7010a37ca5a5", @generic="e6000ba8ead345be98bb", @generic="6928fdfdb69bdadeed3ec49abe17d3a3db8db783727f44d71b5652ef3fd50819cb4c9f6507d51685812d79c035d40b7b4761f6b716b088823f45c70d004df3dd5aa352b1401a924ced3d93c1222b14bce60385f7516dc64e6fa4531560bff169e44af3d46ac5dbafac7191f4d91a3cf10983b24ea4e67efbc9a6a260f10b646c76eafab3e217330d70afd867dca552538ed2fd0c8af1f46fad71642646caf32118c7505bb9fa63e05ab5db46466358cf633e2f4ed642b657c53607ff7ac9ba7f80743363eb9c9c26eb86823e4b0990732d66c88d990f42c11eecef41394eab17b97e2da31dc1781213f34fa7bec4b1173aa955bad9c6fb8104f7901ffc1418a254719056d256fdb680371995e785a0d7a45dcd46676ab35bca3a6a2a547ec5daa0c9791e0c80fcea59996266a2f90abbed4ef892040a28d698103978ecde3dc3d3c2ab479559a7b76f4de546ac97a50ed9e0ed955bd44eac8020d9c1f77657357a43ac6b1e55b045d624dcb419280a330f649cde63024400ab48735842edd97f9c6d498a7a9dc52cd1b6d4a57923fd10aefb1c454e7b309771c5453b9036b03eed25f254d94084a7c63f7b94c734d21bcef41fc8f31fac154ac2f5e07ae7eafcf1e2c6a01736c050febb221594258c027febc6e7649b40bc31a85068f22961700950613dd0f409dc11790cacf8af3d4c83e0048b7176ded0cfdf625a150fbd1e23b0121202409f7ec2c4daa591e5a15a21f9a06cc261c64bb90c2587a787b645b6190c1f125ee73f4ec6acf70a0fecf52b35f30e667c3eb4e5bd190382705d2db749762def655c19110821ddf59283fee5dda9ab523e447bfd2ee6a14c09a80f37d6dea43135b33bea44ba98d8cb1a32c232c61feb632dcc9b29131c8a2141d680aecaa4b47987c52e2408a82c4e6920e19225603b0220047199c3d5f36d0157b400ac5c60e0c81dd404876766a4f39742721c44c2a08ef44b1a54db54b99db766c2c88a8dcece5986dbb7cffab7d8f380a4306a8795328af7a11c1321f89103ec7a9fa9fe97a8b16b09102d0d457cf043065deb2ec464b64d769f5377ecd15614d43bbe86d4563885aa26a8be6ddf215c52ffe954b99865b4c9c3546e4c6422ce545c8b085e87b2d23b4b709137fae4ac7ea6a52f551f052b3e07e646b627a57359d910cfde8e51a83565d7c7348898d60d0450f3eae76011a2281df20d34d87076347c0150f4c4b317c4ce26ede14c6c041d460ee4907e75556f0e169da6bafc3b50d29fe84e8666fd8fb51595dd2f257841ceaedd5020d6689df58769123ce38cd9bab9e2ba8f5157e0706d997cda38a1950831ae8d493eaec57fddbed96ddaeede3a9a73e5651828cf6e035fedad1b03a12b8cd1cf7aa1fa33f9bcd4367461c92fab084cc20440cc4696371f0940fe08c0ed242f2f1977b145a063be846915365b362bb36c2af6c80b67874a07a04a349c2edd33eaa1fb7b8ba08fa86ad10323f3162015099720d7df577afb4ee40e953bc026491bb95b23a43e9af2a803350ebb769a3831317f38ea00968303ab8678e44a87aeae5e310a907ae78552ab49e81f07919269a924eb47b903946855d110e2f6ddc89baed1d99e6b55dd33986579242fa4f623df45224c7a1e0293d1f0f04ab64fde9c34447f01122673c294789ca530340bf66129f74693870a67f7198c382cf8fd5eb7be37f82d7915a8b3b11645c73ee4e9788ef8479e4af8f19ca0a10eca9ec73f808e2b45598f45356d5be5a755a51497fe9faaa2604825f4180686296aad2851339e04a13a043877bd40ab6520ceed4d031efe18ac445755c6dc317a1dd7baa7b7444ba4602124016e6213be29fdac8c707cb34a8657679cea75348d7aab52a001383dea00500cfe3b5fad3e3a58cffa87c26b50b28316e2eb0a92f9e93bfeef7ea81f8b62ec67d9177fc1376b1edee7340957b34ee8c6a6affe48d274c029f71d73e83fb60568b636aa2e9ceb55657b6dea1c492fa350c1df160c1e5385c3fc4f1d4d08923145a8072bc9011995de304569a957c946ae0c98a27dec981a1b6b0704511539a0bbe3d76e4e093a7dedc111ce82061f1a5b6215b7ce5c54acce635fcdfe065d3bf0b2eed7f174aeb2ec4ea67e1de15bef6d745ae69db45532f5346d663d9f2aca5a61d7a4e041e34d68b8a38b7e740ed38013af5ed6b396ce5c378bb2e47df66b1ec530b5bb6c1590bc1fbfe013b9a8bf0ed21c71033f4afbcc272e6bb0ac065a8edcfdb4a4df96c76d56e0cd7ed32120594cfc431f9b636987e87138070bb4700880f82165c8259da728e547b48612acf407244b1cef159e097fe2af94f843f75297aad2f15ff289cac018f5523fbab1544dc5cce6efc335711ed6908e5ae3224b53f6edd282b88cb6f57b88084d9e1634f12e7f0f1488c8443c6250fa233e4e995a5681c1847e2b84c590f24b0b19cd0911b4e85144e602189c92ea6dee5bfc4df3c88edbfe004ad0ada8811a792460cfc2a2de21a8c38c4f2093257f2f07ceb101edf68ccde86128575e65acd0263432527b64dd0c3ae0ab2076db84abee247c33c4c6a49df17ab8d5c583f982c8ee5dba65d1c3850addd03dadeb256d48edf57c2e021858e45f92cac6d90e11f52b4ea6e384de0e8dbe18a1f4e56fb9d85a97168ca8a83f6a51b78b077051b4500bc6ce5c299c839fe0445f6c31ec14a837ad6269380945ecc12b7afc95f91a50ab933d0dde606c0a4429cbf855df538fc6b08fac720a20489230f8e042e140defa04375e89e7bcbdb67c359d9a136a3a06f04b70f0b77b3366af7845ba03480b05f548589e533545a722a2387932021181843fbce3853d9a4b7e8686af349c91abbe2d0ad4ad2f83c44620cbf589adaa6a09d6ed6cdf3b3da1bd958a3c7d4975b6db4aa46ae324e804cd305baf7be571e2180ac0c96d178a28a2bdeff9e1a3c64a8791eb9322f84ac0de17a2c48a57beb9a04b9256457138f72b76bccbcc805cb5615062cbb76e8bd9940031037838db4f8d264bc3c5c15bf577bb2d735a7e8d586d4775625b3f0ec688423eecee471f6ed0f6841896b2145cff033f80ea94d4b709cad36d775441b9e95ae5ad420075de8791f1b2bdf633789b0a9877290173e912add3cb0e6e8ad07ba231e84d95b1e77afd58c3510c5964df627d512126172f0d15968cdfcc193ce69128d9b247405e9d9f1a4afd45bf0f683a978bb8ecb365ce490ba2d248720b6a5417150a18bedcffb51bdc5665bb3abac2a8d34228c3de5b3855fd80680482dd1fe58460bc970e1031cc65ee614f6cca5d23a7bb94b37af4ccf2110e541aa2c435a85ce73ca0143d4f20c69223431a6e8699bd3dafb88d7b4329b4ff08fe8817be9c3cfad44f0eb74f47ee999fcfb431f266ded49f6d0da1088a28d000a20715ed96b11e49a2098c310d19004dbac69b4ec6376fb7357c2836c7489ea08fe499adef64ed3281c93337cdecc55cb4d484b033f3be372cc3856a16cfc04ab54c1a90ef0831015b0bff9e557c0a78bf415b8312829c9da177b6d727f546f1ed9ec44b936966ca5ec592774da0d2cc8e44b6e22722f403f2d39bab7d7bb6088ae9da619076de48b264f47bbc16c31adba2044a96f955e912c3135d3e0399141c376f575dbc3e7d2eb32acc280d78b3cfb3fa23d9e9cd6cc18d5769d0209b3409720e03d655a9cc1537bab0af784ea8a098294be5ddf878e75f424723cdf615e78a3a397c236ae71ac16369b009f03daabb1da2fb1985708b2a595bb92a1381a49ce48ca2e1c34582851761d2f83f5a2844c405e77754e593299b77c530124a0e98de3e12518f2d70d6c7bb8d0dbad697bb5b5578bbe54ae8e3199603845a513648d4b5b4ac2b9a86baead02edb911c3f2c0847034a86be5ac8527400b1bb183f47849a6f01cabc0c716d3aefbda75b4deda103cc7dfd3a81678f65e4c5f51385779e9b906a2269e279658d5d3b71ea8126f473d8ba785231c411c846cb844dea57443533e48f06d08cc06c219010f379fb99eca97fe255ac3cc9c05908d991efb54523fc8c300eca5f2c72b3cd655b4371ada68e8f425d32e480c55282c1c1b30aed4160eff5b755e6145d247a33b250a14ce30dbe4b2dc675fab9673072f4bf87db02275bd51d7f9c5c25c1a88c63ee6ffa07335d7577c076510a73e04c59fded7a6d61cfb4f5f9d079bb3e575355657c1e77fb4f0047bbeb1af16f8c15b95faf7af79173903459a03260859c242a05673a3a93884b99a828c275f9ffdc3324551a9ff8229329c7e6fa5f8d01ff75dfdb954bd1deb8003834a735838d5e363fe79a9168b9b07f04b59f5d52aa21a9e80927d18f4ea14f2d67e47b5d1e8527379bb7e47b09bcd45e316367445043448f46da57b8d082307e8664e7616882fd913d472d551049411a3eb316612cd467d6a5986c45f19022e2660aaaed6ad6f0451dd33e1d6dd8f4c1275f3ddc159d9c63b754843ab6d9087a68445361bd8e50eabff9a99f4b3c7ae1f04222c7dfe441a7baaab5ecf5c897aad6052c455ddc96d5fd72d15ddd2a8bae7a270183c188ecf227385f4a5a479b543b189c88346d3be09fb69b9a65371a091f55cafb5fc9bf747c51751117e31c5fb2fcec8b991ec9c7eaf4dc1c57cece1ec089956d385a0478e95a01164d082653625dbf8fa04b0d0ebd1aa89c610c28d700f034423d64f5f29a3d134d4d0498631d4ba2e1acfb57f1bcbfdb7dbae9280918381357ee3bc5533fe2ba2e01c8169db0a56319ff69838b13e04958838c8ea2ebc153a509c3d0ea334014aa534acaa9aaa498dc564059df754128d6685b729c47f467e55f5557163991abf895c00ab9d3efa608616388f3a8ed8868c9bda9943bd565e471b0074c84aca32882a1e8b7aab09c2aa071d3f7ecdda7421c0393f6d748b9faabb91f0883a301619f3a5d6f2f40798444833bf7df68994f40119d8269993c16bce6bddd2c83c459e693fd9003d9bcfb6d548847507f78d1e63463b8bcd7bd9418fd0c38c1f1283a91ce8e4470cbfc75ba9258f1f256bcd0ca4201f6cc1e9b17e3f5b08a2d3416f79ca8c714f1f25fc158f7c3a56c92cf87dd825d38967ed13f06d700bf2cf75264c9549150cc8d321233215f1b6ddedbddc12a8ffd971033641875485f215eb2281083c4c3492144e0034002b785c4a9cd5ad34e7f4807f642fdff28630746d07778b889da80c251866b868cf3cd1e0cb5b4691e3b1c3b34f74ca5f2ceceffa658eaf93b2ff92337cd666e414493b102b471c3bb276ac112c9ff0848798f582399d23fe5f901813523d09cfe3ddbc1c06f3a94fc84cf6c1c8f21e0e6f34c63a35028488c06c7bdfaf9a50cd07cd7d62210966075edd326a09cdfaffb43f2056b6d997ede39bee81d1b71aed20aab934f05651c7c086b724ebefda7847f2a51a1171866963c0e68a7d0841ed65041c8cf7b2eb7b1e769bcedbf8a186c3ff7e1a3e66af9e20f34820a6a5dcf9eeab30814a932987252da61fb6a6d379948756ff528e5e87df25a57a0d3b026830679c9fb1334395ac5b37e648245f8329df59811f7d2f2366ccbebe9a33d55f1e522e2c48b60a0c1a113941a3fc130d9ccd5884c0d7ecb07cee180543360d03106af185729e4799b27f14cc8409136587485a75b55fe3c6c66cc029d690959e8bdbab82f8bf444db2ced6967957928b338ce87ba562f444ed07475747ef3a9d48ee1d4cc6c0dd2ddc8db2ca16baaac5855297bffa4b17f171f9e6f51410eb3a7e8209a4788712c74b68277e3c8", @generic="56aeacf4ef40c28cab48e5f69241399e63e8c3a1e129b5e9ffd64b7f63b641fc03d30522b50f2b47b5c026b75b141360bcbf70b38a1acdb29ee7f21722676a085fdcdb43a578c0dce21dd7d6b6fdc5298b9f3240c06ae7df4a11f66e83c961b1b03b8fc197bcfc4d4cb7133fbc79089e9e2fc420ef7a2f2bcceb9c2aef7b3e141ddd75468edff2f18e2e45a64d60204b47abb1660e380ec89edf15e56ca9a92f5365624068ada93ce548352dcb8193eedd382f75b528"]}, @nested={0x1148, 0x92, 0x0, 0x1, [@generic="a8437d1e5cef3fefd21b051b8056da3d6fc04e806b0ba86cc023e678a38d882cb7664802d6eb12fa08d9ebf1ec062b", @typed={0x8, 0x57, 0x0, 0x0, @pid=0xffffffffffffffff}, @generic="7537b89bb364d3ce91176e1b2e833c475c1865751773282debc78b13c92ab5bcb0bcd044246f87bed3d9f515b81684181adc61c2970a0f5652c22bde4c08f9077087040e88d13a74b3f32ce0a8c2f5c948546c96d45d814553dd404959270124eeb2b8c6f3a29fb1f2780a0bf04e6ee3b708a5d3cad5e5489580d3aa8be85c3f604ff50788bf1928c826f11e79265145cbef2aaf1485fa69c1b7a1d94cf6b60123f57fe896932cafa91d607574a1076fef64adb872c4b331e3764a2647a85fba86a1576782f05a17dfd2c9169048051e31f00a7eb003f5d2f109a2e8bb3ac71d62eb5fdccd9055ed28bbe5b111300270f322b4538c3f3260779461802eadd5d373857f1942a93213e7ef70cf275ea38314de9f4d14dc170b9c18d1c44b73d10dbbd18ce368c148b70ebf93f8219b077e97dd5e3beed1c7dce74ed99ac7372ea4b68a2da2d04ab1f777ec09b34f797caf89e86acde2188791225fe71b206a4fa0112e4bffd70dfce52d177f22720f1000fa39321b7d349ea284f79abf1b1b9623a4ddc000e7dd35a20705335dc5589bfb265366186ff2c87905a369969739033462a2c6e0addaaf8608f4f7614434d60012196403b2f744ddeb39def83171ef532603a46a317b625261a89639a077d42ba523cc46e2c00577f4541857f5833a934d769757dfd14e3d7ea370b121c90365fc77d1cc8576c0a8528afb4e5eba6ce8b828507fd5202d0f99a60917a97c698a4136ca1a8274662117721f1813a47c007253ce72681af898617756408499f43d18643e4d064f3aeb4596eb4a5f699ede6aaad86615dac2b3e3127e4a0334deb08567c37eec2777e839bb1f237ba59ff2e741831e32b5e619a8917b169e5db1abb977a46372003ae7adb92a494c28cbc1bdca7e568b05f3727c7a0d52d69f34b00005a0570bf733886432e37f65be600b822a61eeeaaa677b6c318412598c024c89ce389a3e2e1f0263483d51884558c4b3cae057a552a83655f545ce479f54a75ccbd66e3f17c3f8d234522122fdb6357b9b2b5406fbe9cd9de572917e47c806837726abddd93a10f41f7cbda8d7ca275b7b606760b22669c5b3ff70f066295c4c66e1c34388511186e3cd51bf0836f02b989d4c0adc8b45f3e089b06119c3aef17cd17e755eeceaa20460117b4cb6562978400bde5c3c63a62a48fd7a62d991001efd64a5473b3a474493acf8d8085136048105281f3b5fd8a98b47811f28b3d1868e97fb4ac083327a56c68a706b6d7d455a0d24bbb749a740b24be554a7629e1b066c2752e2dac05515d58a0443eb3b2ea871a2bdefab756ee632f9e26de9f3edf84406723b8adb544da57ca82d147ad4ce10a83a144d19e36bb060984552fde5e65e8c44f52d2ef0b560eaddc3f9851cccbe4b75bacbfd0691177abdf8dc89e1b49a47ce74ef70077c0acb827c2ba94957d3858c5f4204ec80c0f17ed7dc41c07f125fa48f11a3a4ac31e1dfba505ff1d0133ea6dbb2b91d1c6dc99434dc13f8df7f0d10547f990c387f339cac112488d4b150c49b6b8d13b581da7fb1b2cefa45c6829e048feda278a7b7e74a92ba519dd284d357d4e0515c3d2861634a87001f03798f4052d2f7c29eecfe209b069ff9f7b2f3e222fb88bbda5f7fad83d1bb93615c5fa7fce0938b12b58332110d77821b47ea06c5ed3ea0180968b5c952feac95991aa62866949262d898d4552b770b2af9ed8be41b576beb032e53e68c1f374111e322c107442cb7d92b59031d7b3c71637fb671a83c428bff1f8ec3ff2573d8b57ffd1ffee186f7d6dbc97b07eacf9e45ae4ae120f519d596cece05fae2e49502a58fc38b423d09054a428ba72117246407dbe407e872f3d13586161c365f0f2a029eaee70c3f3f6b7fc9af783f4ce67af6ddf1650c9eb7c6fca22f884b34b28a8ad5908a0ad05d58a5a60d2e2e62a578a908ca42ed12690ee14397f89b14e15bd6e4deb8ada31c455e528ee2254d2ae005d434335ba724b7afac1118d81e6e569fde85265b9a362615d5d7fb12c771bd98c24e27299006748f14332f0adb286382244e0a81ea99f112baebced304d5b4d4797ed2f1dc1482004ba6dbba0e0674ba71be5e711df55c6e9cd02be62c5cfcc82dc902b990110c808ef81f23e863c4455a76c7bd11cd90420ac9f1a7971695a42a728de5137fd944c4bd69ff2fd611803926356d9a967dc3e349d9c8f38dd016f2a8a6f6f29ea14b9d3f708d7fa469593509d4bc485dd0942f0b32e9f4ab85e5ce9cb47923b9fc635c52912cd994b5efe54ef9e1166a3942274b79e01eba3eeacdd97c0acbc2db3082e7cdad40c14907bbab6eef1ef448c831a389c8ae538837899efbd41b6cb07a3a6687c2a45b3a34856e416cdadbc064c1d19aec896e33f10a6c98dcdc75b5d8c8a4d241d84a10c227b0f1a0d148edbf21ce84cddd60fbba5605a066be13fb545c41ecedd77e64f5274c769590c812a29c5245d4b1c0fa57dd6c471a4c7d740af16d8cbf4888bca4c50e5c8ecb45943e919b6fa13f304dca0b48b568d61b35450c5dc89e3d30303ccc91072048ec17ae99f034742a29c49c64547da0599dc4d1a96f6811090471f3d8f9edd2aecbcafb4ebabbacfee3231fcbdd461748477c5e582e31a6c1a92a8304cd7d3a70fa94c6df633d60cb1e43ffd0064412b6ac380747212d5e762160cdae581bba13d00c1ebc2909e5d2a5c924ecc294e0e2f02acfde4ed9e8d8d95526e570aa3a1486ec7a2ab3a5d3a81f521826ea7ae023a1d76394491cce9e14f9088294766939351a5ce6f7d234e651f4198c6a9d0309df2931571a6999ae3936e0990eb2448204714de33f663917b783392a5ba81eb94878bfb4b42b63430760d8dc03c8e15073934ae1ed80543005217ef2ec37ed9440d4a871c0a64bcaa2b2721915c9a001c82def30da31cc74f204a9bb9f83e334caed635475aa437b9ee3355bc3c69a96b60b60a1d555946d06cd9ebddbb37de3ba4b59f46edea410c2297e96929b57e4d88a862749d6c9bc574e55f692b291b94a3e598349636c37dc9f7c250674dea833701c5121e920eb47375276544f0e58b51caa5d2cb789893bfefe2ec07fdc3eb28459db1037cd935ebe23c39aeacef410ea48cdbbdf7f675d0f37671fcb914a5de3feaf0e2a3b89ddffc7e1fe9f412c15669c43b3a513f62f58afc5fa59bcf752a75bd4e0e318ec3670bf7f94e3ceb0327eacb36a010b9451ec08a76bc783f97aad39904ea0b5d276f96a4575dfa456064c5beeb32e599f41c8492ee38d083581fff677f68d84abc55d0977ba7c87b3fc1c8a311cc7084ad9e82bf5313413ac476d89b59a495c0476f6c1a2a2de13180ac1fdce2e949e1d2aaa93ac1a697fc278f8dcf464101dd45e8d35c64bfb71721a1d96e3efe813f9ec931ae6fdb2fa7dd1f9d749b7bd8e95fb7904d1a3a6ce7b6d04198bb933f3255d3a104c6327fdd922b16b6419d9b56cad5ac9db74d8c824ba10dd870db6f877bee1c3f041d45bc889d7caac0b1cab9daa5de03eab31823e83642b20dd6ca05ce9aaad1ffdb8b212393322c64d9978b8bf54447259db3cac222286a9adc8bcf1415d563029cff8549cada95f593ea92be66a12f3224beb24d0dd4183ac1d3fcacc7f7bfebbdf19da71633253c1e814aa935a54b3cce796e75eb35611e08f3cbee2a6f6182afefe8c254900c99856dfc0ce1212869e4fed5c2406213de1fe8d64644fd2c46a832f86ab55a2ea4e83c4a43ea89c80e407fdbf97206a9d9bafdac087d0a74a8fecfc3aa00a4e7114bc5c06fe954eba69e1e6c56a344e4cf4fa392c7931d4c8973e3c8487cea6eee14ea1f9f2fbf7eb00e00bb7257ba6d943bdea78962bf15c77cbbda0ae4138275dd67350c513d755a3d28ea507e25a130f7853883d2783e6b38adab6ae9fb64d09fe58e7943c21026e0b11682fe3914f7dd623b3b7be8205f23619ee317bfe97e17b5c1ab06521f01af8c3093fe47a653fd8a6073a1ed131beb3964808ce8c50ade7a61cc993607a9dfd0c2e400da6e6f8d4de440243204e0d88dfe98fba8737f4f97c732ea2258bfcf8af310bd2fd6bcaa4eb31acd7ba7e4e2149670a5e966fde96d1045ece9448a40fd935d307445619a822066c87d7800dec3c7fbe8e97612a205b9f1e914862c717a0f9572a3c6a381d0636ee2a8b80ee7d8e2d32f4eae89650feaf233bd476a6aa564745849cd5827f6ccba385a812557fa383b001700aea61924906cbf23c57e83ed7be4525280f71d05fe3e0fad5079e505f9fdf2c11d19e0e4e0241f8268d810547bbc72b6f80fd23b522a489842a6ce5341a9a48ab293343d00e809af6ddba5ad3ff0e020b4edaf21afc735662c575c14ce009918681eb7651641a5d7d6341f219ad34ee69bc4d4490c777b35e335010456262d4f37f708c9e9847b579294935010b156435ffb1eabdd085d24e5af44d4bdfb43257a974cfd29bbecaddca3a4519454ce1438c1e609d83d9cc5028bdea4a98055a053f2d52b561c03ba468a4d49b3215a078f167de3e4515a97260c6a3c54357f3d4c118ddd54801b029d0dcdab3345332cbe624b462fef6ff3ffad531019de86eeb584723aa7340058c19533e3ba41b9f0cbac421191aad1682295423dfe20cb63ae46c78cef5329a63b7ee63bb94ed2b200c0aca8880d6098f64c9068d1661b60cbe22f616b11a90896435230aafc0748b6813abd9fcffdd11c29a5a0815383660e2c12ab72ed3c8fa34531bc82f3a1ca0fed474b378e70110073df7a0b96382f8671535d8cde879226a44b93f29f3b252caa224435d1a4ca7af4338677c09ac45b05ccb9b23f175ec471efe8c305a19722f507b09f16df0899161834ee8d29c8f2a333b2931e4546a4e34ec2a583fec53da1e7254f8810e492c478d6455ba4c95716dda0ce06ba0a66b0ff7ae9840c3be78ac498aac0380ff4b1943e411cac31062b7281231923ea44ed30b0409e13f88937a4e8f324bc0137956d2fcc93b7dc92c15643fab620fdd99c94d7a0a1e77ef57eba266f880b388e1451c95af7fd6c2710bb9eec3544db03aba114b11b237398c2aed999c5a2ce60808b9d40c20ca5eecfb5cb1a4ee3c35fa0172194e3a6046d2b2200e9728a9d09885b3baa75c4ced3e0c2376f5700d8e920d85b268e4d303f58bcb577c566ca48e4f79d3da719edb850d7333c4f7102961091737757f15fa1fa376c70e7a533a62086bde95c1e09b5ea6a1df244eeb53ca837a3082d73b59dd6ec402a1e44287bb5e794f8d2d336cc467bd0a9baefa38936249987ab9f04c465c2baa91b2b9fb9bb85bd663ca976c32487ee44876e04b3990338930f30d502fc255acecfee26662381ea40a64e77eb6e630af506eef28b74df105116ab46e310a54aa5455d570a33d8e90d1fdf3380735c0eb68a9aa8464bbf850e9e14abce7256eb8e4c29347856cbcae34809e8f81afd561d9a2e6453947435defab2c85220d15775fba7065891de05526057445b24a801540328c61bffb4048a8d37c59f80994a34dff007258989a9b141ef09225eca17d710de0d1d9b15928944efc07ee30e66f32a209d3c4bc122c70acdff7bc4a5ba59bd774888e7dd09a18b262de70d25919c7255ff291c123349405d0a36e124791eb1a06accc749ea2423f7ffd0fe8aa1a24e856aa7bccc9c5a226ffa0a812f84f9c079f001999448b307d3dc7bdad516f52390072acbcdf1c5bcc8238846aec86904068309b4f6377e4f6b7c19c283a29305eb81c7044b146c05eff25dc", @typed={0x4, 0xb}, @typed={0xa, 0x22, 0x0, 0x0, @str='wlan1\x00'}, @generic="9f4a2367e21b9170c6907308320acf3f08ac9ab55f9b0b430d986d75599b874dc0d96f980a2ed797ea749df5cb3155b17996f4ba6da7dc91", @typed={0x4, 0x2b}, @typed={0x8, 0x26, 0x0, 0x0, @ipv4=@private=0xa010100}, @typed={0x8, 0x59, 0x0, 0x0, @uid=0xee01}, @generic="a9dc58f2cd45a80d0d413878f6659fa1eb94494f8ad5b3f10016748c186808c62e396ac3e858be94fc45487813e11a040b821b6f123511b8b492033f45b7211f441fda06c9f04101098477d94d11189b6d5e8d76be3d8ccaf526ed62f397255fa7486c8d6bd3c8238f149ef1a417627b285932d6d82b974c7fb9dd2b00b0aa458207b6a9e6cff097f8da7250f26901eeb4633508e3d033e386d132d0fd28fd216fcabc249d2d89c91b3a357b2a77f2be18"]}]}, 0x22d4}, {&(0x7f0000000140)={0x28, 0x39, 0x200, 0x70bd26, 0x25dfdbfd, "", [@nested={0x10, 0xf, 0x0, 0x1, [@typed={0xa, 0x46, 0x0, 0x0, @str='wlan1\x00'}]}, @typed={0x8, 0x84, 0x0, 0x0, @ipv4=@broadcast}]}, 0x28}], 0x2, &(0x7f0000000300)=[@rights={{0x14, 0x1, 0x1, [r5]}}, @rights={{0x18, 0x1, 0x1, [r6, r1]}}, @cred={{0x1c, 0x1, 0x2, {r7, 0xee01, 0xee01}}}, @cred={{0x1c, 0x1, 0x2, {0xffffffffffffffff, 0x0, r8}}}, @rights={{0x14, 0x1, 0x1, [r1]}}, @rights={{0x14, 0x1, 0x1, [r1]}}, @rights={{0x18, 0x1, 0x1, [r9, r10]}}], 0xb8, 0x800}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x40000000}, 0xc, &(0x7f00000000c0)={&(0x7f0000000040)=@newlinkprop={0x44, 0x6c, 0x200, 0x70bd2d, 0x25dfdbfe, {0x0, 0x0, 0x0, 0x0, 0x40, 0x30400}, [@IFLA_TARGET_NETNSID={0x8}, @IFLA_NET_NS_PID={0x8, 0x13, r4}, @IFLA_BROADCAST={0xa, 0x2, @multicast}, @IFLA_NET_NS_PID={0x8}]}, 0x44}, 0x1, 0x0, 0x0, 0x4000085}, 0x20040010) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r11 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r11, 0x0, 0x0) 10:42:10 executing program 4: r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000140)='./cgroup.net/syz0\x00', 0x200002, 0x0) r1 = openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) (async) r2 = openat$cgroup_ro(r0, &(0x7f00000000c0)='blkio.bfq.io_queued_recursive\x00', 0x0, 0x0) r3 = openat$cgroup_ro(r2, &(0x7f0000000100)='memory.stat\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r3, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) r4 = socket$inet6(0xa, 0x3, 0x1000) (async) r5 = socket$nl_route(0x10, 0x3, 0x0) (async) r6 = socket(0x1, 0x803, 0x0) getsockname$packet(r6, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r5, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000380)=ANY=[@ANYBLOB="3c0000001000030400"/20, @ANYRES32=0x0, @ANYBLOB="00000000000000001400238009000100766c616e000000000400028008000400ebcd0f89de456ba2adba5bc0cbb38b86f51d2444d23dbcb1d7d7a96ffabda537ba2479c1b7939feaa8a9ed792509f03e47cb4cc634e742052dd5fdd7e36d80dc45c3a7194f5028415765c85a83f5ddd226ef979ef7eee3897192806f5e34648413245e4ec6f47524a92251b28df9d14364ef485ed176172c4061c3ecbd0f37e5227cff413208ad843af68aa6f700b486a03bffa54bd1878d852873ae42fd58f5a642ebba42bcaceb382b8611f1c5c508c40f61c39628dd64b289b9bf00cfeb30d006e3570be09f1805f1f5d5ac48d2bbf2a2476ed0b52f9d0ec82dcc742cb59d4eee99f875c4e867e4ba3fca6a9952932c25ba5b2728355459b7665b64f2874f", @ANYRES32=r7, @ANYBLOB], 0x3c}}, 0x40000) (async) sendmsg$nl_route_sched(r1, &(0x7f0000000240)={&(0x7f0000000180), 0xc, &(0x7f0000000200)={&(0x7f00000002c0)=ANY=[@ANYBLOB="34000000660000012dbd7000fedbdf250000000087de1b3013ae25f4ae0850d462d667a972fac0a18ecb8b7d3e87273d599e423c083f844f47686bbc623df8b07c0c4dac14e6e4e658da36ecc93ba5ea93a1ba37154d032abc1c82e97bcaa3d058481b478fa88168e6d49b8c68e80efb3e7465293f25e3dd521267bb94c530d144c12b5295f1a3916134a3a4eef4c3ebe026540dd81093c885b2f041523d", @ANYRES32=r7, @ANYBLOB="f1ff0000080005000d00020008000b000800000008000b0007000000"], 0x34}, 0x1, 0x0, 0x0, 0x40040b0}, 0x4800) ioctl$sock_kcm_SIOCKCMCLONE(r2, 0x89e2, &(0x7f00000001c0)={r1}) getsockopt$inet_sctp_SCTP_I_WANT_MAPPED_V4_ADDR(r8, 0x84, 0xc, &(0x7f00000004c0), &(0x7f0000000500)=0x4) r9 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r9, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) (async) preadv(r9, &(0x7f00000007c0)=[{&(0x7f0000000540)=""/114, 0x72}, {&(0x7f00000005c0)=""/233, 0xe9}, {&(0x7f00000006c0)=""/54, 0x36}, {&(0x7f0000000700)=""/158, 0x9e}], 0x4, 0x100, 0xfffffff7) (async) getsockopt$inet_sctp_SCTP_PR_ASSOC_STATUS(0xffffffffffffffff, 0x84, 0x73, &(0x7f0000000000)={0x0, 0x3, 0x0, 0x81, 0x1000}, &(0x7f0000000080)=0x18) mmap(&(0x7f0000ffc000/0x4000)=nil, 0x4000, 0x0, 0x810, r4, 0x14b70000) [ 2194.181227][ T8291] 8021q: adding VLAN 0 to HW filter on device bond855 10:42:10 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='blkio.bfq.sectors\x00', 0x275a, 0x0) write$binfmt_script(r1, &(0x7f00000000c0)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x200000b, 0x28011, r1, 0x0) r2 = socket$alg(0x26, 0x5, 0x0) bind$alg(r2, &(0x7f0000000000)={0x26, 'aead\x00', 0x0, 0x0, 'aegis128-generic\x00'}, 0x58) r3 = accept4(r2, 0x0, 0x0, 0x0) setsockopt$ALG_SET_KEY(r2, 0x117, 0x1, &(0x7f0000000200)="ad00"/16, 0x10) recvmmsg(r3, &(0x7f0000002440), 0x3ffffffffffff67, 0x0, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2194.393288][ T8300] bond855: (slave bridge1010): making interface the new active one [ 2194.451533][ T27] audit: type=1804 audit(1688208130.473:81): pid=8327 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=open_writers comm="syz-executor.0" name="/root/syzkaller-testdir2576221800/syzkaller.juiRbD/2825/cgroup.controllers" dev="sda1" ino=1972 res=1 errno=0 10:42:10 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xfc030000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:42:10 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) getsockopt$inet_IP_XFRM_POLICY(r0, 0x0, 0x11, &(0x7f0000000080)={{{@in6=@loopback, @in=@local, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}}, {{@in6=@local}, 0x0, @in=@local}}, &(0x7f0000000180)=0xe8) sendmsg$nl_netfilter(r0, &(0x7f00000005c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x2}, 0xc, &(0x7f0000000580)={&(0x7f0000000600)=ANY=[@ANYBLOB="a4030000120b01032cbd7000fddbdf250700000a8d036d80e0a2c85d28fb71b56034d9a9851858675b29dce619bff4ab972f6cad50e51de556424cbc5f4096328cbeddc146e7dcb0cbe89fcc9bc077db5986495f8255babdc86f1813b5ec41407fdc1a603b32e5d7eb37da3f92789ce2678db83e0846caec818966f8d9fa4c72d7740282f75b7718d25cc1906afb221e0ce3bbcabd26a64d637ae7a8d6b7062363b938b96515aae167c02b19fc9bcff5b8eccd9a0f7006042265b15222051887705ddeb6decd73862384d6f32dddbcc906aa8b9d9f423ca6af84292b5efce5269d7a3f82ad91ae61dd567c2f6399c44871b75c288464c9e5c0020286db14000000fe88000000000000000000000000010163fac1277b8d73f3386269a7af7a0ca895f517814c84ed77652577da416f45bdc2634416888309390667ea60cfea72c5b4ab13f87e470cfb30e33c8e40d9ca3839513528575caaf8baa5eb6dd7bc0503ed869a57871f997411067dfaac27f7344c8ee74b1e20420bcfdae016d6e8f3049934296eb40efe6206bedafad5b41b77edf9a3e5e6c7e07b9ae92e0f4d417a6d098690693f8f59eab09c680f8a837d925d3aac1a50a75c35df3d017d2de0e81824986394cee266672a958eea35dddc7dcd79b3f10a8396330d6009cbe626226a5cfa24d8e621452078ae9418c365da3d35158bd2a8e7e7592b604c8cc45e2e69d10a03f0177b0b7bcd272333ede0a87fb77e7acbd1c2a5f9e9c1afc5b0b8273e199d084e65858a29ee1e5d968fef68deef1c96cd4c09827e4be9499308f6b52a4c63b4ab75177470e99e2f157bf1b35344abb94375e70b2281e06c6dc7ae4ff4c89a227a6d39b82b0fe51fdc6c7afa4f7cc27083bbde2e86d17c4da4f54375fb075ccbc3867745439644e9b6197fb100ca5ea29cf1a09a769c40bc90cb42d59908008900", @ANYRES32=0x0, @ANYBLOB="04003100acc9433f9d6df5408573aad599aeff5c6dc82ed93e9f9711e19055f550293a435b05a5b540f01cd4880ccc70cdd6e2ccae9d27698332e64e7dd810bc2fa23c546df8ae7f6f00d44d4c65f3f4ceee9dffffda4d63740300c9ab9caf0cf86c45e50d52b55b16bec1dfa7d04e1278c8f9b8b028593c960ddaca119e0a54734585b8c19aa83a4433d3ed9de4da2e94423cc3e701fc8b221cb4c5d4a5da375db8fee8c7e1e8e75dfd4e734394df92bb7dc67f350ec4b5245562b7094aeffcaf98c7bdfcfa41de2a327964aed592f2b808002800225b0888d805ec4271d1fb6b6ef75011914c2b7a961a14955bc7854e89cbefcb89387984bf287915", @ANYRES32=r1, @ANYBLOB='\x00\x00\x00'], 0x3a4}}, 0x44801) [ 2194.500159][ T8300] bond855: (slave bridge1010): Enslaving as an active interface with an up link [ 2194.529655][ T8311] validate_nla: 5 callbacks suppressed [ 2194.529674][ T8311] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 10:42:10 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) getsockopt$inet_IP_XFRM_POLICY(r0, 0x0, 0x11, &(0x7f0000000080)={{{@in6=@loopback, @in=@local, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}}, {{@in6=@local}, 0x0, @in=@local}}, &(0x7f0000000180)=0xe8) sendmsg$nl_netfilter(r0, &(0x7f00000005c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x2}, 0xc, &(0x7f0000000580)={&(0x7f0000000600)=ANY=[@ANYBLOB="a4030000120b01032cbd7000fddbdf250700000a8d036d80e0a2c85d28fb71b56034d9a9851858675b29dce619bff4ab972f6cad50e51de556424cbc5f4096328cbeddc146e7dcb0cbe89fcc9bc077db5986495f8255babdc86f1813b5ec41407fdc1a603b32e5d7eb37da3f92789ce2678db83e0846caec818966f8d9fa4c72d7740282f75b7718d25cc1906afb221e0ce3bbcabd26a64d637ae7a8d6b7062363b938b96515aae167c02b19fc9bcff5b8eccd9a0f7006042265b15222051887705ddeb6decd73862384d6f32dddbcc906aa8b9d9f423ca6af84292b5efce5269d7a3f82ad91ae61dd567c2f6399c44871b75c288464c9e5c0020286db14000000fe88000000000000000000000000010163fac1277b8d73f3386269a7af7a0ca895f517814c84ed77652577da416f45bdc2634416888309390667ea60cfea72c5b4ab13f87e470cfb30e33c8e40d9ca3839513528575caaf8baa5eb6dd7bc0503ed869a57871f997411067dfaac27f7344c8ee74b1e20420bcfdae016d6e8f3049934296eb40efe6206bedafad5b41b77edf9a3e5e6c7e07b9ae92e0f4d417a6d098690693f8f59eab09c680f8a837d925d3aac1a50a75c35df3d017d2de0e81824986394cee266672a958eea35dddc7dcd79b3f10a8396330d6009cbe626226a5cfa24d8e621452078ae9418c365da3d35158bd2a8e7e7592b604c8cc45e2e69d10a03f0177b0b7bcd272333ede0a87fb77e7acbd1c2a5f9e9c1afc5b0b8273e199d084e65858a29ee1e5d968fef68deef1c96cd4c09827e4be9499308f6b52a4c63b4ab75177470e99e2f157bf1b35344abb94375e70b2281e06c6dc7ae4ff4c89a227a6d39b82b0fe51fdc6c7afa4f7cc27083bbde2e86d17c4da4f54375fb075ccbc3867745439644e9b6197fb100ca5ea29cf1a09a769c40bc90cb42d59908008900", @ANYRES32=0x0, @ANYBLOB="04003100acc9433f9d6df5408573aad599aeff5c6dc82ed93e9f9711e19055f550293a435b05a5b540f01cd4880ccc70cdd6e2ccae9d27698332e64e7dd810bc2fa23c546df8ae7f6f00d44d4c65f3f4ceee9dffffda4d63740300c9ab9caf0cf86c45e50d52b55b16bec1dfa7d04e1278c8f9b8b028593c960ddaca119e0a54734585b8c19aa83a4433d3ed9de4da2e94423cc3e701fc8b221cb4c5d4a5da375db8fee8c7e1e8e75dfd4e734394df92bb7dc67f350ec4b5245562b7094aeffcaf98c7bdfcfa41de2a327964aed592f2b808002800225b0888d805ec4271d1fb6b6ef75011914c2b7a961a14955bc7854e89cbefcb89387984bf287915", @ANYRES32=r1, @ANYBLOB='\x00\x00\x00'], 0x3a4}}, 0x44801) [ 2194.833113][ T8311] 8021q: adding VLAN 0 to HW filter on device bond1415 10:42:11 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (async) accept(0xffffffffffffffff, 0x0, 0x0) r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r0, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) (async) ioctl$FS_IOC_RESVSP(r0, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) (async) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r0, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) (async) write$tun(r0, 0x0, 0x0) (async) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r1, &(0x7f0000000100)=ANY=[], 0x208e24b) (async) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2000002, 0x28011, r1, 0x0) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) (async) r3 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r3, r2, 0x0, 0x10000a006) (async) r4 = gettid() (async) r5 = syz_init_net_socket$bt_hci(0x1f, 0x3, 0x1) r6 = bpf$BPF_LINK_CREATE(0x1c, &(0x7f0000000280)={r1, r1, 0x4}, 0x10) (async) ioctl$sock_SIOCGPGRP(r1, 0x8904, &(0x7f00000002c0)=0x0) r8 = getgid() (async) r9 = socket$inet6_udp(0x1c, 0x2, 0x0) sendto$inet6(r9, 0x0, 0x0, 0x0, &(0x7f0000000080)={0x1c, 0x1c}, 0x1c) r10 = socket$inet6_udp(0x1c, 0x2, 0x0) sendto$inet6(r10, 0x0, 0x0, 0x0, &(0x7f0000000080)={0x1c, 0x1c, 0x0, @rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01'}, 0x1c) sendmsg$netlink(r3, &(0x7f0000000400)={&(0x7f00000000c0)=@kern={0x10, 0x0, 0x0, 0x800000}, 0xc, &(0x7f0000000240)=[{&(0x7f00000004c0)={0x22d4, 0x22, 0x800, 0x70bd28, 0x25dfdbfb, "", [@typed={0x8, 0x10, 0x0, 0x0, @pid}, @nested={0x1174, 0x2, 0x0, 0x1, [@typed={0x8, 0x8d, 0x0, 0x0, @pid=r4}, @generic="cdb693b25691092fa1ed0827addb58779865b2b26a68400434a186a3227e8770c9607d9aa7f6eb0e94aac247006a52b400b0a4200bbccae6b3de57e09522fa75425af9994f5b1fbdb776eea16b26865ecd591c57bccc6e", @generic="f86c0b7669bc4867d98340c0e21471e98c427fa52edc6c8e704d038ced6d119f31a5de74ae52ccf67c8bf3fb728ddfbb6d3050952a94476fbeb7fb637fa01cce6422fb5c28345c6152f03b7010a37ca5a5", @generic="e6000ba8ead345be98bb", @generic="6928fdfdb69bdadeed3ec49abe17d3a3db8db783727f44d71b5652ef3fd50819cb4c9f6507d51685812d79c035d40b7b4761f6b716b088823f45c70d004df3dd5aa352b1401a924ced3d93c1222b14bce60385f7516dc64e6fa4531560bff169e44af3d46ac5dbafac7191f4d91a3cf10983b24ea4e67efbc9a6a260f10b646c76eafab3e217330d70afd867dca552538ed2fd0c8af1f46fad71642646caf32118c7505bb9fa63e05ab5db46466358cf633e2f4ed642b657c53607ff7ac9ba7f80743363eb9c9c26eb86823e4b0990732d66c88d990f42c11eecef41394eab17b97e2da31dc1781213f34fa7bec4b1173aa955bad9c6fb8104f7901ffc1418a254719056d256fdb680371995e785a0d7a45dcd46676ab35bca3a6a2a547ec5daa0c9791e0c80fcea59996266a2f90abbed4ef892040a28d698103978ecde3dc3d3c2ab479559a7b76f4de546ac97a50ed9e0ed955bd44eac8020d9c1f77657357a43ac6b1e55b045d624dcb419280a330f649cde63024400ab48735842edd97f9c6d498a7a9dc52cd1b6d4a57923fd10aefb1c454e7b309771c5453b9036b03eed25f254d94084a7c63f7b94c734d21bcef41fc8f31fac154ac2f5e07ae7eafcf1e2c6a01736c050febb221594258c027febc6e7649b40bc31a85068f22961700950613dd0f409dc11790cacf8af3d4c83e0048b7176ded0cfdf625a150fbd1e23b0121202409f7ec2c4daa591e5a15a21f9a06cc261c64bb90c2587a787b645b6190c1f125ee73f4ec6acf70a0fecf52b35f30e667c3eb4e5bd190382705d2db749762def655c19110821ddf59283fee5dda9ab523e447bfd2ee6a14c09a80f37d6dea43135b33bea44ba98d8cb1a32c232c61feb632dcc9b29131c8a2141d680aecaa4b47987c52e2408a82c4e6920e19225603b0220047199c3d5f36d0157b400ac5c60e0c81dd404876766a4f39742721c44c2a08ef44b1a54db54b99db766c2c88a8dcece5986dbb7cffab7d8f380a4306a8795328af7a11c1321f89103ec7a9fa9fe97a8b16b09102d0d457cf043065deb2ec464b64d769f5377ecd15614d43bbe86d4563885aa26a8be6ddf215c52ffe954b99865b4c9c3546e4c6422ce545c8b085e87b2d23b4b709137fae4ac7ea6a52f551f052b3e07e646b627a57359d910cfde8e51a83565d7c7348898d60d0450f3eae76011a2281df20d34d87076347c0150f4c4b317c4ce26ede14c6c041d460ee4907e75556f0e169da6bafc3b50d29fe84e8666fd8fb51595dd2f257841ceaedd5020d6689df58769123ce38cd9bab9e2ba8f5157e0706d997cda38a1950831ae8d493eaec57fddbed96ddaeede3a9a73e5651828cf6e035fedad1b03a12b8cd1cf7aa1fa33f9bcd4367461c92fab084cc20440cc4696371f0940fe08c0ed242f2f1977b145a063be846915365b362bb36c2af6c80b67874a07a04a349c2edd33eaa1fb7b8ba08fa86ad10323f3162015099720d7df577afb4ee40e953bc026491bb95b23a43e9af2a803350ebb769a3831317f38ea00968303ab8678e44a87aeae5e310a907ae78552ab49e81f07919269a924eb47b903946855d110e2f6ddc89baed1d99e6b55dd33986579242fa4f623df45224c7a1e0293d1f0f04ab64fde9c34447f01122673c294789ca530340bf66129f74693870a67f7198c382cf8fd5eb7be37f82d7915a8b3b11645c73ee4e9788ef8479e4af8f19ca0a10eca9ec73f808e2b45598f45356d5be5a755a51497fe9faaa2604825f4180686296aad2851339e04a13a043877bd40ab6520ceed4d031efe18ac445755c6dc317a1dd7baa7b7444ba4602124016e6213be29fdac8c707cb34a8657679cea75348d7aab52a001383dea00500cfe3b5fad3e3a58cffa87c26b50b28316e2eb0a92f9e93bfeef7ea81f8b62ec67d9177fc1376b1edee7340957b34ee8c6a6affe48d274c029f71d73e83fb60568b636aa2e9ceb55657b6dea1c492fa350c1df160c1e5385c3fc4f1d4d08923145a8072bc9011995de304569a957c946ae0c98a27dec981a1b6b0704511539a0bbe3d76e4e093a7dedc111ce82061f1a5b6215b7ce5c54acce635fcdfe065d3bf0b2eed7f174aeb2ec4ea67e1de15bef6d745ae69db45532f5346d663d9f2aca5a61d7a4e041e34d68b8a38b7e740ed38013af5ed6b396ce5c378bb2e47df66b1ec530b5bb6c1590bc1fbfe013b9a8bf0ed21c71033f4afbcc272e6bb0ac065a8edcfdb4a4df96c76d56e0cd7ed32120594cfc431f9b636987e87138070bb4700880f82165c8259da728e547b48612acf407244b1cef159e097fe2af94f843f75297aad2f15ff289cac018f5523fbab1544dc5cce6efc335711ed6908e5ae3224b53f6edd282b88cb6f57b88084d9e1634f12e7f0f1488c8443c6250fa233e4e995a5681c1847e2b84c590f24b0b19cd0911b4e85144e602189c92ea6dee5bfc4df3c88edbfe004ad0ada8811a792460cfc2a2de21a8c38c4f2093257f2f07ceb101edf68ccde86128575e65acd0263432527b64dd0c3ae0ab2076db84abee247c33c4c6a49df17ab8d5c583f982c8ee5dba65d1c3850addd03dadeb256d48edf57c2e021858e45f92cac6d90e11f52b4ea6e384de0e8dbe18a1f4e56fb9d85a97168ca8a83f6a51b78b077051b4500bc6ce5c299c839fe0445f6c31ec14a837ad6269380945ecc12b7afc95f91a50ab933d0dde606c0a4429cbf855df538fc6b08fac720a20489230f8e042e140defa04375e89e7bcbdb67c359d9a136a3a06f04b70f0b77b3366af7845ba03480b05f548589e533545a722a2387932021181843fbce3853d9a4b7e8686af349c91abbe2d0ad4ad2f83c44620cbf589adaa6a09d6ed6cdf3b3da1bd958a3c7d4975b6db4aa46ae324e804cd305baf7be571e2180ac0c96d178a28a2bdeff9e1a3c64a8791eb9322f84ac0de17a2c48a57beb9a04b9256457138f72b76bccbcc805cb5615062cbb76e8bd9940031037838db4f8d264bc3c5c15bf577bb2d735a7e8d586d4775625b3f0ec688423eecee471f6ed0f6841896b2145cff033f80ea94d4b709cad36d775441b9e95ae5ad420075de8791f1b2bdf633789b0a9877290173e912add3cb0e6e8ad07ba231e84d95b1e77afd58c3510c5964df627d512126172f0d15968cdfcc193ce69128d9b247405e9d9f1a4afd45bf0f683a978bb8ecb365ce490ba2d248720b6a5417150a18bedcffb51bdc5665bb3abac2a8d34228c3de5b3855fd80680482dd1fe58460bc970e1031cc65ee614f6cca5d23a7bb94b37af4ccf2110e541aa2c435a85ce73ca0143d4f20c69223431a6e8699bd3dafb88d7b4329b4ff08fe8817be9c3cfad44f0eb74f47ee999fcfb431f266ded49f6d0da1088a28d000a20715ed96b11e49a2098c310d19004dbac69b4ec6376fb7357c2836c7489ea08fe499adef64ed3281c93337cdecc55cb4d484b033f3be372cc3856a16cfc04ab54c1a90ef0831015b0bff9e557c0a78bf415b8312829c9da177b6d727f546f1ed9ec44b936966ca5ec592774da0d2cc8e44b6e22722f403f2d39bab7d7bb6088ae9da619076de48b264f47bbc16c31adba2044a96f955e912c3135d3e0399141c376f575dbc3e7d2eb32acc280d78b3cfb3fa23d9e9cd6cc18d5769d0209b3409720e03d655a9cc1537bab0af784ea8a098294be5ddf878e75f424723cdf615e78a3a397c236ae71ac16369b009f03daabb1da2fb1985708b2a595bb92a1381a49ce48ca2e1c34582851761d2f83f5a2844c405e77754e593299b77c530124a0e98de3e12518f2d70d6c7bb8d0dbad697bb5b5578bbe54ae8e3199603845a513648d4b5b4ac2b9a86baead02edb911c3f2c0847034a86be5ac8527400b1bb183f47849a6f01cabc0c716d3aefbda75b4deda103cc7dfd3a81678f65e4c5f51385779e9b906a2269e279658d5d3b71ea8126f473d8ba785231c411c846cb844dea57443533e48f06d08cc06c219010f379fb99eca97fe255ac3cc9c05908d991efb54523fc8c300eca5f2c72b3cd655b4371ada68e8f425d32e480c55282c1c1b30aed4160eff5b755e6145d247a33b250a14ce30dbe4b2dc675fab9673072f4bf87db02275bd51d7f9c5c25c1a88c63ee6ffa07335d7577c076510a73e04c59fded7a6d61cfb4f5f9d079bb3e575355657c1e77fb4f0047bbeb1af16f8c15b95faf7af79173903459a03260859c242a05673a3a93884b99a828c275f9ffdc3324551a9ff8229329c7e6fa5f8d01ff75dfdb954bd1deb8003834a735838d5e363fe79a9168b9b07f04b59f5d52aa21a9e80927d18f4ea14f2d67e47b5d1e8527379bb7e47b09bcd45e316367445043448f46da57b8d082307e8664e7616882fd913d472d551049411a3eb316612cd467d6a5986c45f19022e2660aaaed6ad6f0451dd33e1d6dd8f4c1275f3ddc159d9c63b754843ab6d9087a68445361bd8e50eabff9a99f4b3c7ae1f04222c7dfe441a7baaab5ecf5c897aad6052c455ddc96d5fd72d15ddd2a8bae7a270183c188ecf227385f4a5a479b543b189c88346d3be09fb69b9a65371a091f55cafb5fc9bf747c51751117e31c5fb2fcec8b991ec9c7eaf4dc1c57cece1ec089956d385a0478e95a01164d082653625dbf8fa04b0d0ebd1aa89c610c28d700f034423d64f5f29a3d134d4d0498631d4ba2e1acfb57f1bcbfdb7dbae9280918381357ee3bc5533fe2ba2e01c8169db0a56319ff69838b13e04958838c8ea2ebc153a509c3d0ea334014aa534acaa9aaa498dc564059df754128d6685b729c47f467e55f5557163991abf895c00ab9d3efa608616388f3a8ed8868c9bda9943bd565e471b0074c84aca32882a1e8b7aab09c2aa071d3f7ecdda7421c0393f6d748b9faabb91f0883a301619f3a5d6f2f40798444833bf7df68994f40119d8269993c16bce6bddd2c83c459e693fd9003d9bcfb6d548847507f78d1e63463b8bcd7bd9418fd0c38c1f1283a91ce8e4470cbfc75ba9258f1f256bcd0ca4201f6cc1e9b17e3f5b08a2d3416f79ca8c714f1f25fc158f7c3a56c92cf87dd825d38967ed13f06d700bf2cf75264c9549150cc8d321233215f1b6ddedbddc12a8ffd971033641875485f215eb2281083c4c3492144e0034002b785c4a9cd5ad34e7f4807f642fdff28630746d07778b889da80c251866b868cf3cd1e0cb5b4691e3b1c3b34f74ca5f2ceceffa658eaf93b2ff92337cd666e414493b102b471c3bb276ac112c9ff0848798f582399d23fe5f901813523d09cfe3ddbc1c06f3a94fc84cf6c1c8f21e0e6f34c63a35028488c06c7bdfaf9a50cd07cd7d62210966075edd326a09cdfaffb43f2056b6d997ede39bee81d1b71aed20aab934f05651c7c086b724ebefda7847f2a51a1171866963c0e68a7d0841ed65041c8cf7b2eb7b1e769bcedbf8a186c3ff7e1a3e66af9e20f34820a6a5dcf9eeab30814a932987252da61fb6a6d379948756ff528e5e87df25a57a0d3b026830679c9fb1334395ac5b37e648245f8329df59811f7d2f2366ccbebe9a33d55f1e522e2c48b60a0c1a113941a3fc130d9ccd5884c0d7ecb07cee180543360d03106af185729e4799b27f14cc8409136587485a75b55fe3c6c66cc029d690959e8bdbab82f8bf444db2ced6967957928b338ce87ba562f444ed07475747ef3a9d48ee1d4cc6c0dd2ddc8db2ca16baaac5855297bffa4b17f171f9e6f51410eb3a7e8209a4788712c74b68277e3c8", @generic="56aeacf4ef40c28cab48e5f69241399e63e8c3a1e129b5e9ffd64b7f63b641fc03d30522b50f2b47b5c026b75b141360bcbf70b38a1acdb29ee7f21722676a085fdcdb43a578c0dce21dd7d6b6fdc5298b9f3240c06ae7df4a11f66e83c961b1b03b8fc197bcfc4d4cb7133fbc79089e9e2fc420ef7a2f2bcceb9c2aef7b3e141ddd75468edff2f18e2e45a64d60204b47abb1660e380ec89edf15e56ca9a92f5365624068ada93ce548352dcb8193eedd382f75b528"]}, @nested={0x1148, 0x92, 0x0, 0x1, [@generic="a8437d1e5cef3fefd21b051b8056da3d6fc04e806b0ba86cc023e678a38d882cb7664802d6eb12fa08d9ebf1ec062b", @typed={0x8, 0x57, 0x0, 0x0, @pid=0xffffffffffffffff}, @generic="7537b89bb364d3ce91176e1b2e833c475c1865751773282debc78b13c92ab5bcb0bcd044246f87bed3d9f515b81684181adc61c2970a0f5652c22bde4c08f9077087040e88d13a74b3f32ce0a8c2f5c948546c96d45d814553dd404959270124eeb2b8c6f3a29fb1f2780a0bf04e6ee3b708a5d3cad5e5489580d3aa8be85c3f604ff50788bf1928c826f11e79265145cbef2aaf1485fa69c1b7a1d94cf6b60123f57fe896932cafa91d607574a1076fef64adb872c4b331e3764a2647a85fba86a1576782f05a17dfd2c9169048051e31f00a7eb003f5d2f109a2e8bb3ac71d62eb5fdccd9055ed28bbe5b111300270f322b4538c3f3260779461802eadd5d373857f1942a93213e7ef70cf275ea38314de9f4d14dc170b9c18d1c44b73d10dbbd18ce368c148b70ebf93f8219b077e97dd5e3beed1c7dce74ed99ac7372ea4b68a2da2d04ab1f777ec09b34f797caf89e86acde2188791225fe71b206a4fa0112e4bffd70dfce52d177f22720f1000fa39321b7d349ea284f79abf1b1b9623a4ddc000e7dd35a20705335dc5589bfb265366186ff2c87905a369969739033462a2c6e0addaaf8608f4f7614434d60012196403b2f744ddeb39def83171ef532603a46a317b625261a89639a077d42ba523cc46e2c00577f4541857f5833a934d769757dfd14e3d7ea370b121c90365fc77d1cc8576c0a8528afb4e5eba6ce8b828507fd5202d0f99a60917a97c698a4136ca1a8274662117721f1813a47c007253ce72681af898617756408499f43d18643e4d064f3aeb4596eb4a5f699ede6aaad86615dac2b3e3127e4a0334deb08567c37eec2777e839bb1f237ba59ff2e741831e32b5e619a8917b169e5db1abb977a46372003ae7adb92a494c28cbc1bdca7e568b05f3727c7a0d52d69f34b00005a0570bf733886432e37f65be600b822a61eeeaaa677b6c318412598c024c89ce389a3e2e1f0263483d51884558c4b3cae057a552a83655f545ce479f54a75ccbd66e3f17c3f8d234522122fdb6357b9b2b5406fbe9cd9de572917e47c806837726abddd93a10f41f7cbda8d7ca275b7b606760b22669c5b3ff70f066295c4c66e1c34388511186e3cd51bf0836f02b989d4c0adc8b45f3e089b06119c3aef17cd17e755eeceaa20460117b4cb6562978400bde5c3c63a62a48fd7a62d991001efd64a5473b3a474493acf8d8085136048105281f3b5fd8a98b47811f28b3d1868e97fb4ac083327a56c68a706b6d7d455a0d24bbb749a740b24be554a7629e1b066c2752e2dac05515d58a0443eb3b2ea871a2bdefab756ee632f9e26de9f3edf84406723b8adb544da57ca82d147ad4ce10a83a144d19e36bb060984552fde5e65e8c44f52d2ef0b560eaddc3f9851cccbe4b75bacbfd0691177abdf8dc89e1b49a47ce74ef70077c0acb827c2ba94957d3858c5f4204ec80c0f17ed7dc41c07f125fa48f11a3a4ac31e1dfba505ff1d0133ea6dbb2b91d1c6dc99434dc13f8df7f0d10547f990c387f339cac112488d4b150c49b6b8d13b581da7fb1b2cefa45c6829e048feda278a7b7e74a92ba519dd284d357d4e0515c3d2861634a87001f03798f4052d2f7c29eecfe209b069ff9f7b2f3e222fb88bbda5f7fad83d1bb93615c5fa7fce0938b12b58332110d77821b47ea06c5ed3ea0180968b5c952feac95991aa62866949262d898d4552b770b2af9ed8be41b576beb032e53e68c1f374111e322c107442cb7d92b59031d7b3c71637fb671a83c428bff1f8ec3ff2573d8b57ffd1ffee186f7d6dbc97b07eacf9e45ae4ae120f519d596cece05fae2e49502a58fc38b423d09054a428ba72117246407dbe407e872f3d13586161c365f0f2a029eaee70c3f3f6b7fc9af783f4ce67af6ddf1650c9eb7c6fca22f884b34b28a8ad5908a0ad05d58a5a60d2e2e62a578a908ca42ed12690ee14397f89b14e15bd6e4deb8ada31c455e528ee2254d2ae005d434335ba724b7afac1118d81e6e569fde85265b9a362615d5d7fb12c771bd98c24e27299006748f14332f0adb286382244e0a81ea99f112baebced304d5b4d4797ed2f1dc1482004ba6dbba0e0674ba71be5e711df55c6e9cd02be62c5cfcc82dc902b990110c808ef81f23e863c4455a76c7bd11cd90420ac9f1a7971695a42a728de5137fd944c4bd69ff2fd611803926356d9a967dc3e349d9c8f38dd016f2a8a6f6f29ea14b9d3f708d7fa469593509d4bc485dd0942f0b32e9f4ab85e5ce9cb47923b9fc635c52912cd994b5efe54ef9e1166a3942274b79e01eba3eeacdd97c0acbc2db3082e7cdad40c14907bbab6eef1ef448c831a389c8ae538837899efbd41b6cb07a3a6687c2a45b3a34856e416cdadbc064c1d19aec896e33f10a6c98dcdc75b5d8c8a4d241d84a10c227b0f1a0d148edbf21ce84cddd60fbba5605a066be13fb545c41ecedd77e64f5274c769590c812a29c5245d4b1c0fa57dd6c471a4c7d740af16d8cbf4888bca4c50e5c8ecb45943e919b6fa13f304dca0b48b568d61b35450c5dc89e3d30303ccc91072048ec17ae99f034742a29c49c64547da0599dc4d1a96f6811090471f3d8f9edd2aecbcafb4ebabbacfee3231fcbdd461748477c5e582e31a6c1a92a8304cd7d3a70fa94c6df633d60cb1e43ffd0064412b6ac380747212d5e762160cdae581bba13d00c1ebc2909e5d2a5c924ecc294e0e2f02acfde4ed9e8d8d95526e570aa3a1486ec7a2ab3a5d3a81f521826ea7ae023a1d76394491cce9e14f9088294766939351a5ce6f7d234e651f4198c6a9d0309df2931571a6999ae3936e0990eb2448204714de33f663917b783392a5ba81eb94878bfb4b42b63430760d8dc03c8e15073934ae1ed80543005217ef2ec37ed9440d4a871c0a64bcaa2b2721915c9a001c82def30da31cc74f204a9bb9f83e334caed635475aa437b9ee3355bc3c69a96b60b60a1d555946d06cd9ebddbb37de3ba4b59f46edea410c2297e96929b57e4d88a862749d6c9bc574e55f692b291b94a3e598349636c37dc9f7c250674dea833701c5121e920eb47375276544f0e58b51caa5d2cb789893bfefe2ec07fdc3eb28459db1037cd935ebe23c39aeacef410ea48cdbbdf7f675d0f37671fcb914a5de3feaf0e2a3b89ddffc7e1fe9f412c15669c43b3a513f62f58afc5fa59bcf752a75bd4e0e318ec3670bf7f94e3ceb0327eacb36a010b9451ec08a76bc783f97aad39904ea0b5d276f96a4575dfa456064c5beeb32e599f41c8492ee38d083581fff677f68d84abc55d0977ba7c87b3fc1c8a311cc7084ad9e82bf5313413ac476d89b59a495c0476f6c1a2a2de13180ac1fdce2e949e1d2aaa93ac1a697fc278f8dcf464101dd45e8d35c64bfb71721a1d96e3efe813f9ec931ae6fdb2fa7dd1f9d749b7bd8e95fb7904d1a3a6ce7b6d04198bb933f3255d3a104c6327fdd922b16b6419d9b56cad5ac9db74d8c824ba10dd870db6f877bee1c3f041d45bc889d7caac0b1cab9daa5de03eab31823e83642b20dd6ca05ce9aaad1ffdb8b212393322c64d9978b8bf54447259db3cac222286a9adc8bcf1415d563029cff8549cada95f593ea92be66a12f3224beb24d0dd4183ac1d3fcacc7f7bfebbdf19da71633253c1e814aa935a54b3cce796e75eb35611e08f3cbee2a6f6182afefe8c254900c99856dfc0ce1212869e4fed5c2406213de1fe8d64644fd2c46a832f86ab55a2ea4e83c4a43ea89c80e407fdbf97206a9d9bafdac087d0a74a8fecfc3aa00a4e7114bc5c06fe954eba69e1e6c56a344e4cf4fa392c7931d4c8973e3c8487cea6eee14ea1f9f2fbf7eb00e00bb7257ba6d943bdea78962bf15c77cbbda0ae4138275dd67350c513d755a3d28ea507e25a130f7853883d2783e6b38adab6ae9fb64d09fe58e7943c21026e0b11682fe3914f7dd623b3b7be8205f23619ee317bfe97e17b5c1ab06521f01af8c3093fe47a653fd8a6073a1ed131beb3964808ce8c50ade7a61cc993607a9dfd0c2e400da6e6f8d4de440243204e0d88dfe98fba8737f4f97c732ea2258bfcf8af310bd2fd6bcaa4eb31acd7ba7e4e2149670a5e966fde96d1045ece9448a40fd935d307445619a822066c87d7800dec3c7fbe8e97612a205b9f1e914862c717a0f9572a3c6a381d0636ee2a8b80ee7d8e2d32f4eae89650feaf233bd476a6aa564745849cd5827f6ccba385a812557fa383b001700aea61924906cbf23c57e83ed7be4525280f71d05fe3e0fad5079e505f9fdf2c11d19e0e4e0241f8268d810547bbc72b6f80fd23b522a489842a6ce5341a9a48ab293343d00e809af6ddba5ad3ff0e020b4edaf21afc735662c575c14ce009918681eb7651641a5d7d6341f219ad34ee69bc4d4490c777b35e335010456262d4f37f708c9e9847b579294935010b156435ffb1eabdd085d24e5af44d4bdfb43257a974cfd29bbecaddca3a4519454ce1438c1e609d83d9cc5028bdea4a98055a053f2d52b561c03ba468a4d49b3215a078f167de3e4515a97260c6a3c54357f3d4c118ddd54801b029d0dcdab3345332cbe624b462fef6ff3ffad531019de86eeb584723aa7340058c19533e3ba41b9f0cbac421191aad1682295423dfe20cb63ae46c78cef5329a63b7ee63bb94ed2b200c0aca8880d6098f64c9068d1661b60cbe22f616b11a90896435230aafc0748b6813abd9fcffdd11c29a5a0815383660e2c12ab72ed3c8fa34531bc82f3a1ca0fed474b378e70110073df7a0b96382f8671535d8cde879226a44b93f29f3b252caa224435d1a4ca7af4338677c09ac45b05ccb9b23f175ec471efe8c305a19722f507b09f16df0899161834ee8d29c8f2a333b2931e4546a4e34ec2a583fec53da1e7254f8810e492c478d6455ba4c95716dda0ce06ba0a66b0ff7ae9840c3be78ac498aac0380ff4b1943e411cac31062b7281231923ea44ed30b0409e13f88937a4e8f324bc0137956d2fcc93b7dc92c15643fab620fdd99c94d7a0a1e77ef57eba266f880b388e1451c95af7fd6c2710bb9eec3544db03aba114b11b237398c2aed999c5a2ce60808b9d40c20ca5eecfb5cb1a4ee3c35fa0172194e3a6046d2b2200e9728a9d09885b3baa75c4ced3e0c2376f5700d8e920d85b268e4d303f58bcb577c566ca48e4f79d3da719edb850d7333c4f7102961091737757f15fa1fa376c70e7a533a62086bde95c1e09b5ea6a1df244eeb53ca837a3082d73b59dd6ec402a1e44287bb5e794f8d2d336cc467bd0a9baefa38936249987ab9f04c465c2baa91b2b9fb9bb85bd663ca976c32487ee44876e04b3990338930f30d502fc255acecfee26662381ea40a64e77eb6e630af506eef28b74df105116ab46e310a54aa5455d570a33d8e90d1fdf3380735c0eb68a9aa8464bbf850e9e14abce7256eb8e4c29347856cbcae34809e8f81afd561d9a2e6453947435defab2c85220d15775fba7065891de05526057445b24a801540328c61bffb4048a8d37c59f80994a34dff007258989a9b141ef09225eca17d710de0d1d9b15928944efc07ee30e66f32a209d3c4bc122c70acdff7bc4a5ba59bd774888e7dd09a18b262de70d25919c7255ff291c123349405d0a36e124791eb1a06accc749ea2423f7ffd0fe8aa1a24e856aa7bccc9c5a226ffa0a812f84f9c079f001999448b307d3dc7bdad516f52390072acbcdf1c5bcc8238846aec86904068309b4f6377e4f6b7c19c283a29305eb81c7044b146c05eff25dc", @typed={0x4, 0xb}, @typed={0xa, 0x22, 0x0, 0x0, @str='wlan1\x00'}, @generic="9f4a2367e21b9170c6907308320acf3f08ac9ab55f9b0b430d986d75599b874dc0d96f980a2ed797ea749df5cb3155b17996f4ba6da7dc91", @typed={0x4, 0x2b}, @typed={0x8, 0x26, 0x0, 0x0, @ipv4=@private=0xa010100}, @typed={0x8, 0x59, 0x0, 0x0, @uid=0xee01}, @generic="a9dc58f2cd45a80d0d413878f6659fa1eb94494f8ad5b3f10016748c186808c62e396ac3e858be94fc45487813e11a040b821b6f123511b8b492033f45b7211f441fda06c9f04101098477d94d11189b6d5e8d76be3d8ccaf526ed62f397255fa7486c8d6bd3c8238f149ef1a417627b285932d6d82b974c7fb9dd2b00b0aa458207b6a9e6cff097f8da7250f26901eeb4633508e3d033e386d132d0fd28fd216fcabc249d2d89c91b3a357b2a77f2be18"]}]}, 0x22d4}, {&(0x7f0000000140)={0x28, 0x39, 0x200, 0x70bd26, 0x25dfdbfd, "", [@nested={0x10, 0xf, 0x0, 0x1, [@typed={0xa, 0x46, 0x0, 0x0, @str='wlan1\x00'}]}, @typed={0x8, 0x84, 0x0, 0x0, @ipv4=@broadcast}]}, 0x28}], 0x2, &(0x7f0000000300)=[@rights={{0x14, 0x1, 0x1, [r5]}}, @rights={{0x18, 0x1, 0x1, [r6, r1]}}, @cred={{0x1c, 0x1, 0x2, {r7, 0xee01, 0xee01}}}, @cred={{0x1c, 0x1, 0x2, {0xffffffffffffffff, 0x0, r8}}}, @rights={{0x14, 0x1, 0x1, [r1]}}, @rights={{0x14, 0x1, 0x1, [r1]}}, @rights={{0x18, 0x1, 0x1, [r9, r10]}}], 0xb8, 0x800}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x40000000}, 0xc, &(0x7f00000000c0)={&(0x7f0000000040)=@newlinkprop={0x44, 0x6c, 0x200, 0x70bd2d, 0x25dfdbfe, {0x0, 0x0, 0x0, 0x0, 0x40, 0x30400}, [@IFLA_TARGET_NETNSID={0x8}, @IFLA_NET_NS_PID={0x8, 0x13, r4}, @IFLA_BROADCAST={0xa, 0x2, @multicast}, @IFLA_NET_NS_PID={0x8}]}, 0x44}, 0x1, 0x0, 0x0, 0x4000085}, 0x20040010) (async) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r11 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) sendmsg$nl_route(r11, 0x0, 0x0) 10:42:11 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) getsockopt$inet_IP_XFRM_POLICY(r0, 0x0, 0x11, &(0x7f0000000080)={{{@in6=@loopback, @in=@local, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}}, {{@in6=@local}, 0x0, @in=@local}}, &(0x7f0000000180)=0xe8) sendmsg$nl_netfilter(r0, &(0x7f00000005c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x2}, 0xc, &(0x7f0000000580)={&(0x7f0000000600)=ANY=[@ANYBLOB="a4030000120b01032cbd7000fddbdf250700000a8d036d80e0a2c85d28fb71b56034d9a9851858675b29dce619bff4ab972f6cad50e51de556424cbc5f4096328cbeddc146e7dcb0cbe89fcc9bc077db5986495f8255babdc86f1813b5ec41407fdc1a603b32e5d7eb37da3f92789ce2678db83e0846caec818966f8d9fa4c72d7740282f75b7718d25cc1906afb221e0ce3bbcabd26a64d637ae7a8d6b7062363b938b96515aae167c02b19fc9bcff5b8eccd9a0f7006042265b15222051887705ddeb6decd73862384d6f32dddbcc906aa8b9d9f423ca6af84292b5efce5269d7a3f82ad91ae61dd567c2f6399c44871b75c288464c9e5c0020286db14000000fe88000000000000000000000000010163fac1277b8d73f3386269a7af7a0ca895f517814c84ed77652577da416f45bdc2634416888309390667ea60cfea72c5b4ab13f87e470cfb30e33c8e40d9ca3839513528575caaf8baa5eb6dd7bc0503ed869a57871f997411067dfaac27f7344c8ee74b1e20420bcfdae016d6e8f3049934296eb40efe6206bedafad5b41b77edf9a3e5e6c7e07b9ae92e0f4d417a6d098690693f8f59eab09c680f8a837d925d3aac1a50a75c35df3d017d2de0e81824986394cee266672a958eea35dddc7dcd79b3f10a8396330d6009cbe626226a5cfa24d8e621452078ae9418c365da3d35158bd2a8e7e7592b604c8cc45e2e69d10a03f0177b0b7bcd272333ede0a87fb77e7acbd1c2a5f9e9c1afc5b0b8273e199d084e65858a29ee1e5d968fef68deef1c96cd4c09827e4be9499308f6b52a4c63b4ab75177470e99e2f157bf1b35344abb94375e70b2281e06c6dc7ae4ff4c89a227a6d39b82b0fe51fdc6c7afa4f7cc27083bbde2e86d17c4da4f54375fb075ccbc3867745439644e9b6197fb100ca5ea29cf1a09a769c40bc90cb42d59908008900", @ANYRES32=0x0, @ANYBLOB="04003100acc9433f9d6df5408573aad599aeff5c6dc82ed93e9f9711e19055f550293a435b05a5b540f01cd4880ccc70cdd6e2ccae9d27698332e64e7dd810bc2fa23c546df8ae7f6f00d44d4c65f3f4ceee9dffffda4d63740300c9ab9caf0cf86c45e50d52b55b16bec1dfa7d04e1278c8f9b8b028593c960ddaca119e0a54734585b8c19aa83a4433d3ed9de4da2e94423cc3e701fc8b221cb4c5d4a5da375db8fee8c7e1e8e75dfd4e734394df92bb7dc67f350ec4b5245562b7094aeffcaf98c7bdfcfa41de2a327964aed592f2b808002800225b0888d805ec4271d1fb6b6ef75011914c2b7a961a14955bc7854e89cbefcb89387984bf287915", @ANYRES32=r1, @ANYBLOB='\x00\x00\x00'], 0x3a4}}, 0x44801) [ 2195.057438][ T8315] bond1415: (slave bridge1278): making interface the new active one 10:42:11 executing program 2: pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r0) ioctl$sock_SIOCGIFVLAN_GET_VLAN_INGRESS_PRIORITY_CMD(r0, 0x8982, &(0x7f0000000000)) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2195.119177][ T8315] bond1415: (slave bridge1278): Enslaving as an active interface with an up link [ 2195.180374][ T8323] netlink: 'syz-executor.3': attribute type 1 has an invalid length. 10:42:11 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xfe030000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2195.226061][ T8323] workqueue: Failed to create a rescuer kthread for wq "bond1453": -EINTR [ 2195.281226][ T8361] EXT4-fs warning: 3 callbacks suppressed [ 2195.281243][ T8361] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2195.355027][ T8326] bridge1348: entered promiscuous mode [ 2195.367745][ T8326] bridge1348: entered allmulticast mode 10:42:11 executing program 2: pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r0) ioctl$sock_SIOCGIFVLAN_GET_VLAN_INGRESS_PRIORITY_CMD(r0, 0x8982, &(0x7f0000000000)) (async) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2195.543505][ T8368] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:42:11 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:11 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r0, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r0, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) (async) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r0, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) (async) write$tun(r0, 0x0, 0x0) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r1, &(0x7f0000000100)=ANY=[], 0x208e24b) (async) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2000002, 0x28011, r1, 0x0) (async, rerun: 32) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) (rerun: 32) r3 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r3, r2, 0x0, 0x10000a006) (async) r4 = gettid() (async, rerun: 64) r5 = syz_init_net_socket$bt_hci(0x1f, 0x3, 0x1) (async, rerun: 64) r6 = bpf$BPF_LINK_CREATE(0x1c, &(0x7f0000000280)={r1, r1, 0x4}, 0x10) (async, rerun: 64) ioctl$sock_SIOCGPGRP(r1, 0x8904, &(0x7f00000002c0)=0x0) (rerun: 64) r8 = getgid() (async, rerun: 32) r9 = socket$inet6_udp(0x1c, 0x2, 0x0) (rerun: 32) sendto$inet6(r9, 0x0, 0x0, 0x0, &(0x7f0000000080)={0x1c, 0x1c}, 0x1c) (async, rerun: 64) r10 = socket$inet6_udp(0x1c, 0x2, 0x0) (rerun: 64) sendto$inet6(r10, 0x0, 0x0, 0x0, &(0x7f0000000080)={0x1c, 0x1c, 0x0, @rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01'}, 0x1c) (async) sendmsg$netlink(r3, &(0x7f0000000400)={&(0x7f00000000c0)=@kern={0x10, 0x0, 0x0, 0x800000}, 0xc, &(0x7f0000000240)=[{&(0x7f00000004c0)={0x22d4, 0x22, 0x800, 0x70bd28, 0x25dfdbfb, "", [@typed={0x8, 0x10, 0x0, 0x0, @pid}, @nested={0x1174, 0x2, 0x0, 0x1, [@typed={0x8, 0x8d, 0x0, 0x0, @pid=r4}, @generic="cdb693b25691092fa1ed0827addb58779865b2b26a68400434a186a3227e8770c9607d9aa7f6eb0e94aac247006a52b400b0a4200bbccae6b3de57e09522fa75425af9994f5b1fbdb776eea16b26865ecd591c57bccc6e", @generic="f86c0b7669bc4867d98340c0e21471e98c427fa52edc6c8e704d038ced6d119f31a5de74ae52ccf67c8bf3fb728ddfbb6d3050952a94476fbeb7fb637fa01cce6422fb5c28345c6152f03b7010a37ca5a5", @generic="e6000ba8ead345be98bb", @generic="6928fdfdb69bdadeed3ec49abe17d3a3db8db783727f44d71b5652ef3fd50819cb4c9f6507d51685812d79c035d40b7b4761f6b716b088823f45c70d004df3dd5aa352b1401a924ced3d93c1222b14bce60385f7516dc64e6fa4531560bff169e44af3d46ac5dbafac7191f4d91a3cf10983b24ea4e67efbc9a6a260f10b646c76eafab3e217330d70afd867dca552538ed2fd0c8af1f46fad71642646caf32118c7505bb9fa63e05ab5db46466358cf633e2f4ed642b657c53607ff7ac9ba7f80743363eb9c9c26eb86823e4b0990732d66c88d990f42c11eecef41394eab17b97e2da31dc1781213f34fa7bec4b1173aa955bad9c6fb8104f7901ffc1418a254719056d256fdb680371995e785a0d7a45dcd46676ab35bca3a6a2a547ec5daa0c9791e0c80fcea59996266a2f90abbed4ef892040a28d698103978ecde3dc3d3c2ab479559a7b76f4de546ac97a50ed9e0ed955bd44eac8020d9c1f77657357a43ac6b1e55b045d624dcb419280a330f649cde63024400ab48735842edd97f9c6d498a7a9dc52cd1b6d4a57923fd10aefb1c454e7b309771c5453b9036b03eed25f254d94084a7c63f7b94c734d21bcef41fc8f31fac154ac2f5e07ae7eafcf1e2c6a01736c050febb221594258c027febc6e7649b40bc31a85068f22961700950613dd0f409dc11790cacf8af3d4c83e0048b7176ded0cfdf625a150fbd1e23b0121202409f7ec2c4daa591e5a15a21f9a06cc261c64bb90c2587a787b645b6190c1f125ee73f4ec6acf70a0fecf52b35f30e667c3eb4e5bd190382705d2db749762def655c19110821ddf59283fee5dda9ab523e447bfd2ee6a14c09a80f37d6dea43135b33bea44ba98d8cb1a32c232c61feb632dcc9b29131c8a2141d680aecaa4b47987c52e2408a82c4e6920e19225603b0220047199c3d5f36d0157b400ac5c60e0c81dd404876766a4f39742721c44c2a08ef44b1a54db54b99db766c2c88a8dcece5986dbb7cffab7d8f380a4306a8795328af7a11c1321f89103ec7a9fa9fe97a8b16b09102d0d457cf043065deb2ec464b64d769f5377ecd15614d43bbe86d4563885aa26a8be6ddf215c52ffe954b99865b4c9c3546e4c6422ce545c8b085e87b2d23b4b709137fae4ac7ea6a52f551f052b3e07e646b627a57359d910cfde8e51a83565d7c7348898d60d0450f3eae76011a2281df20d34d87076347c0150f4c4b317c4ce26ede14c6c041d460ee4907e75556f0e169da6bafc3b50d29fe84e8666fd8fb51595dd2f257841ceaedd5020d6689df58769123ce38cd9bab9e2ba8f5157e0706d997cda38a1950831ae8d493eaec57fddbed96ddaeede3a9a73e5651828cf6e035fedad1b03a12b8cd1cf7aa1fa33f9bcd4367461c92fab084cc20440cc4696371f0940fe08c0ed242f2f1977b145a063be846915365b362bb36c2af6c80b67874a07a04a349c2edd33eaa1fb7b8ba08fa86ad10323f3162015099720d7df577afb4ee40e953bc026491bb95b23a43e9af2a803350ebb769a3831317f38ea00968303ab8678e44a87aeae5e310a907ae78552ab49e81f07919269a924eb47b903946855d110e2f6ddc89baed1d99e6b55dd33986579242fa4f623df45224c7a1e0293d1f0f04ab64fde9c34447f01122673c294789ca530340bf66129f74693870a67f7198c382cf8fd5eb7be37f82d7915a8b3b11645c73ee4e9788ef8479e4af8f19ca0a10eca9ec73f808e2b45598f45356d5be5a755a51497fe9faaa2604825f4180686296aad2851339e04a13a043877bd40ab6520ceed4d031efe18ac445755c6dc317a1dd7baa7b7444ba4602124016e6213be29fdac8c707cb34a8657679cea75348d7aab52a001383dea00500cfe3b5fad3e3a58cffa87c26b50b28316e2eb0a92f9e93bfeef7ea81f8b62ec67d9177fc1376b1edee7340957b34ee8c6a6affe48d274c029f71d73e83fb60568b636aa2e9ceb55657b6dea1c492fa350c1df160c1e5385c3fc4f1d4d08923145a8072bc9011995de304569a957c946ae0c98a27dec981a1b6b0704511539a0bbe3d76e4e093a7dedc111ce82061f1a5b6215b7ce5c54acce635fcdfe065d3bf0b2eed7f174aeb2ec4ea67e1de15bef6d745ae69db45532f5346d663d9f2aca5a61d7a4e041e34d68b8a38b7e740ed38013af5ed6b396ce5c378bb2e47df66b1ec530b5bb6c1590bc1fbfe013b9a8bf0ed21c71033f4afbcc272e6bb0ac065a8edcfdb4a4df96c76d56e0cd7ed32120594cfc431f9b636987e87138070bb4700880f82165c8259da728e547b48612acf407244b1cef159e097fe2af94f843f75297aad2f15ff289cac018f5523fbab1544dc5cce6efc335711ed6908e5ae3224b53f6edd282b88cb6f57b88084d9e1634f12e7f0f1488c8443c6250fa233e4e995a5681c1847e2b84c590f24b0b19cd0911b4e85144e602189c92ea6dee5bfc4df3c88edbfe004ad0ada8811a792460cfc2a2de21a8c38c4f2093257f2f07ceb101edf68ccde86128575e65acd0263432527b64dd0c3ae0ab2076db84abee247c33c4c6a49df17ab8d5c583f982c8ee5dba65d1c3850addd03dadeb256d48edf57c2e021858e45f92cac6d90e11f52b4ea6e384de0e8dbe18a1f4e56fb9d85a97168ca8a83f6a51b78b077051b4500bc6ce5c299c839fe0445f6c31ec14a837ad6269380945ecc12b7afc95f91a50ab933d0dde606c0a4429cbf855df538fc6b08fac720a20489230f8e042e140defa04375e89e7bcbdb67c359d9a136a3a06f04b70f0b77b3366af7845ba03480b05f548589e533545a722a2387932021181843fbce3853d9a4b7e8686af349c91abbe2d0ad4ad2f83c44620cbf589adaa6a09d6ed6cdf3b3da1bd958a3c7d4975b6db4aa46ae324e804cd305baf7be571e2180ac0c96d178a28a2bdeff9e1a3c64a8791eb9322f84ac0de17a2c48a57beb9a04b9256457138f72b76bccbcc805cb5615062cbb76e8bd9940031037838db4f8d264bc3c5c15bf577bb2d735a7e8d586d4775625b3f0ec688423eecee471f6ed0f6841896b2145cff033f80ea94d4b709cad36d775441b9e95ae5ad420075de8791f1b2bdf633789b0a9877290173e912add3cb0e6e8ad07ba231e84d95b1e77afd58c3510c5964df627d512126172f0d15968cdfcc193ce69128d9b247405e9d9f1a4afd45bf0f683a978bb8ecb365ce490ba2d248720b6a5417150a18bedcffb51bdc5665bb3abac2a8d34228c3de5b3855fd80680482dd1fe58460bc970e1031cc65ee614f6cca5d23a7bb94b37af4ccf2110e541aa2c435a85ce73ca0143d4f20c69223431a6e8699bd3dafb88d7b4329b4ff08fe8817be9c3cfad44f0eb74f47ee999fcfb431f266ded49f6d0da1088a28d000a20715ed96b11e49a2098c310d19004dbac69b4ec6376fb7357c2836c7489ea08fe499adef64ed3281c93337cdecc55cb4d484b033f3be372cc3856a16cfc04ab54c1a90ef0831015b0bff9e557c0a78bf415b8312829c9da177b6d727f546f1ed9ec44b936966ca5ec592774da0d2cc8e44b6e22722f403f2d39bab7d7bb6088ae9da619076de48b264f47bbc16c31adba2044a96f955e912c3135d3e0399141c376f575dbc3e7d2eb32acc280d78b3cfb3fa23d9e9cd6cc18d5769d0209b3409720e03d655a9cc1537bab0af784ea8a098294be5ddf878e75f424723cdf615e78a3a397c236ae71ac16369b009f03daabb1da2fb1985708b2a595bb92a1381a49ce48ca2e1c34582851761d2f83f5a2844c405e77754e593299b77c530124a0e98de3e12518f2d70d6c7bb8d0dbad697bb5b5578bbe54ae8e3199603845a513648d4b5b4ac2b9a86baead02edb911c3f2c0847034a86be5ac8527400b1bb183f47849a6f01cabc0c716d3aefbda75b4deda103cc7dfd3a81678f65e4c5f51385779e9b906a2269e279658d5d3b71ea8126f473d8ba785231c411c846cb844dea57443533e48f06d08cc06c219010f379fb99eca97fe255ac3cc9c05908d991efb54523fc8c300eca5f2c72b3cd655b4371ada68e8f425d32e480c55282c1c1b30aed4160eff5b755e6145d247a33b250a14ce30dbe4b2dc675fab9673072f4bf87db02275bd51d7f9c5c25c1a88c63ee6ffa07335d7577c076510a73e04c59fded7a6d61cfb4f5f9d079bb3e575355657c1e77fb4f0047bbeb1af16f8c15b95faf7af79173903459a03260859c242a05673a3a93884b99a828c275f9ffdc3324551a9ff8229329c7e6fa5f8d01ff75dfdb954bd1deb8003834a735838d5e363fe79a9168b9b07f04b59f5d52aa21a9e80927d18f4ea14f2d67e47b5d1e8527379bb7e47b09bcd45e316367445043448f46da57b8d082307e8664e7616882fd913d472d551049411a3eb316612cd467d6a5986c45f19022e2660aaaed6ad6f0451dd33e1d6dd8f4c1275f3ddc159d9c63b754843ab6d9087a68445361bd8e50eabff9a99f4b3c7ae1f04222c7dfe441a7baaab5ecf5c897aad6052c455ddc96d5fd72d15ddd2a8bae7a270183c188ecf227385f4a5a479b543b189c88346d3be09fb69b9a65371a091f55cafb5fc9bf747c51751117e31c5fb2fcec8b991ec9c7eaf4dc1c57cece1ec089956d385a0478e95a01164d082653625dbf8fa04b0d0ebd1aa89c610c28d700f034423d64f5f29a3d134d4d0498631d4ba2e1acfb57f1bcbfdb7dbae9280918381357ee3bc5533fe2ba2e01c8169db0a56319ff69838b13e04958838c8ea2ebc153a509c3d0ea334014aa534acaa9aaa498dc564059df754128d6685b729c47f467e55f5557163991abf895c00ab9d3efa608616388f3a8ed8868c9bda9943bd565e471b0074c84aca32882a1e8b7aab09c2aa071d3f7ecdda7421c0393f6d748b9faabb91f0883a301619f3a5d6f2f40798444833bf7df68994f40119d8269993c16bce6bddd2c83c459e693fd9003d9bcfb6d548847507f78d1e63463b8bcd7bd9418fd0c38c1f1283a91ce8e4470cbfc75ba9258f1f256bcd0ca4201f6cc1e9b17e3f5b08a2d3416f79ca8c714f1f25fc158f7c3a56c92cf87dd825d38967ed13f06d700bf2cf75264c9549150cc8d321233215f1b6ddedbddc12a8ffd971033641875485f215eb2281083c4c3492144e0034002b785c4a9cd5ad34e7f4807f642fdff28630746d07778b889da80c251866b868cf3cd1e0cb5b4691e3b1c3b34f74ca5f2ceceffa658eaf93b2ff92337cd666e414493b102b471c3bb276ac112c9ff0848798f582399d23fe5f901813523d09cfe3ddbc1c06f3a94fc84cf6c1c8f21e0e6f34c63a35028488c06c7bdfaf9a50cd07cd7d62210966075edd326a09cdfaffb43f2056b6d997ede39bee81d1b71aed20aab934f05651c7c086b724ebefda7847f2a51a1171866963c0e68a7d0841ed65041c8cf7b2eb7b1e769bcedbf8a186c3ff7e1a3e66af9e20f34820a6a5dcf9eeab30814a932987252da61fb6a6d379948756ff528e5e87df25a57a0d3b026830679c9fb1334395ac5b37e648245f8329df59811f7d2f2366ccbebe9a33d55f1e522e2c48b60a0c1a113941a3fc130d9ccd5884c0d7ecb07cee180543360d03106af185729e4799b27f14cc8409136587485a75b55fe3c6c66cc029d690959e8bdbab82f8bf444db2ced6967957928b338ce87ba562f444ed07475747ef3a9d48ee1d4cc6c0dd2ddc8db2ca16baaac5855297bffa4b17f171f9e6f51410eb3a7e8209a4788712c74b68277e3c8", @generic="56aeacf4ef40c28cab48e5f69241399e63e8c3a1e129b5e9ffd64b7f63b641fc03d30522b50f2b47b5c026b75b141360bcbf70b38a1acdb29ee7f21722676a085fdcdb43a578c0dce21dd7d6b6fdc5298b9f3240c06ae7df4a11f66e83c961b1b03b8fc197bcfc4d4cb7133fbc79089e9e2fc420ef7a2f2bcceb9c2aef7b3e141ddd75468edff2f18e2e45a64d60204b47abb1660e380ec89edf15e56ca9a92f5365624068ada93ce548352dcb8193eedd382f75b528"]}, @nested={0x1148, 0x92, 0x0, 0x1, [@generic="a8437d1e5cef3fefd21b051b8056da3d6fc04e806b0ba86cc023e678a38d882cb7664802d6eb12fa08d9ebf1ec062b", @typed={0x8, 0x57, 0x0, 0x0, @pid=0xffffffffffffffff}, @generic="7537b89bb364d3ce91176e1b2e833c475c1865751773282debc78b13c92ab5bcb0bcd044246f87bed3d9f515b81684181adc61c2970a0f5652c22bde4c08f9077087040e88d13a74b3f32ce0a8c2f5c948546c96d45d814553dd404959270124eeb2b8c6f3a29fb1f2780a0bf04e6ee3b708a5d3cad5e5489580d3aa8be85c3f604ff50788bf1928c826f11e79265145cbef2aaf1485fa69c1b7a1d94cf6b60123f57fe896932cafa91d607574a1076fef64adb872c4b331e3764a2647a85fba86a1576782f05a17dfd2c9169048051e31f00a7eb003f5d2f109a2e8bb3ac71d62eb5fdccd9055ed28bbe5b111300270f322b4538c3f3260779461802eadd5d373857f1942a93213e7ef70cf275ea38314de9f4d14dc170b9c18d1c44b73d10dbbd18ce368c148b70ebf93f8219b077e97dd5e3beed1c7dce74ed99ac7372ea4b68a2da2d04ab1f777ec09b34f797caf89e86acde2188791225fe71b206a4fa0112e4bffd70dfce52d177f22720f1000fa39321b7d349ea284f79abf1b1b9623a4ddc000e7dd35a20705335dc5589bfb265366186ff2c87905a369969739033462a2c6e0addaaf8608f4f7614434d60012196403b2f744ddeb39def83171ef532603a46a317b625261a89639a077d42ba523cc46e2c00577f4541857f5833a934d769757dfd14e3d7ea370b121c90365fc77d1cc8576c0a8528afb4e5eba6ce8b828507fd5202d0f99a60917a97c698a4136ca1a8274662117721f1813a47c007253ce72681af898617756408499f43d18643e4d064f3aeb4596eb4a5f699ede6aaad86615dac2b3e3127e4a0334deb08567c37eec2777e839bb1f237ba59ff2e741831e32b5e619a8917b169e5db1abb977a46372003ae7adb92a494c28cbc1bdca7e568b05f3727c7a0d52d69f34b00005a0570bf733886432e37f65be600b822a61eeeaaa677b6c318412598c024c89ce389a3e2e1f0263483d51884558c4b3cae057a552a83655f545ce479f54a75ccbd66e3f17c3f8d234522122fdb6357b9b2b5406fbe9cd9de572917e47c806837726abddd93a10f41f7cbda8d7ca275b7b606760b22669c5b3ff70f066295c4c66e1c34388511186e3cd51bf0836f02b989d4c0adc8b45f3e089b06119c3aef17cd17e755eeceaa20460117b4cb6562978400bde5c3c63a62a48fd7a62d991001efd64a5473b3a474493acf8d8085136048105281f3b5fd8a98b47811f28b3d1868e97fb4ac083327a56c68a706b6d7d455a0d24bbb749a740b24be554a7629e1b066c2752e2dac05515d58a0443eb3b2ea871a2bdefab756ee632f9e26de9f3edf84406723b8adb544da57ca82d147ad4ce10a83a144d19e36bb060984552fde5e65e8c44f52d2ef0b560eaddc3f9851cccbe4b75bacbfd0691177abdf8dc89e1b49a47ce74ef70077c0acb827c2ba94957d3858c5f4204ec80c0f17ed7dc41c07f125fa48f11a3a4ac31e1dfba505ff1d0133ea6dbb2b91d1c6dc99434dc13f8df7f0d10547f990c387f339cac112488d4b150c49b6b8d13b581da7fb1b2cefa45c6829e048feda278a7b7e74a92ba519dd284d357d4e0515c3d2861634a87001f03798f4052d2f7c29eecfe209b069ff9f7b2f3e222fb88bbda5f7fad83d1bb93615c5fa7fce0938b12b58332110d77821b47ea06c5ed3ea0180968b5c952feac95991aa62866949262d898d4552b770b2af9ed8be41b576beb032e53e68c1f374111e322c107442cb7d92b59031d7b3c71637fb671a83c428bff1f8ec3ff2573d8b57ffd1ffee186f7d6dbc97b07eacf9e45ae4ae120f519d596cece05fae2e49502a58fc38b423d09054a428ba72117246407dbe407e872f3d13586161c365f0f2a029eaee70c3f3f6b7fc9af783f4ce67af6ddf1650c9eb7c6fca22f884b34b28a8ad5908a0ad05d58a5a60d2e2e62a578a908ca42ed12690ee14397f89b14e15bd6e4deb8ada31c455e528ee2254d2ae005d434335ba724b7afac1118d81e6e569fde85265b9a362615d5d7fb12c771bd98c24e27299006748f14332f0adb286382244e0a81ea99f112baebced304d5b4d4797ed2f1dc1482004ba6dbba0e0674ba71be5e711df55c6e9cd02be62c5cfcc82dc902b990110c808ef81f23e863c4455a76c7bd11cd90420ac9f1a7971695a42a728de5137fd944c4bd69ff2fd611803926356d9a967dc3e349d9c8f38dd016f2a8a6f6f29ea14b9d3f708d7fa469593509d4bc485dd0942f0b32e9f4ab85e5ce9cb47923b9fc635c52912cd994b5efe54ef9e1166a3942274b79e01eba3eeacdd97c0acbc2db3082e7cdad40c14907bbab6eef1ef448c831a389c8ae538837899efbd41b6cb07a3a6687c2a45b3a34856e416cdadbc064c1d19aec896e33f10a6c98dcdc75b5d8c8a4d241d84a10c227b0f1a0d148edbf21ce84cddd60fbba5605a066be13fb545c41ecedd77e64f5274c769590c812a29c5245d4b1c0fa57dd6c471a4c7d740af16d8cbf4888bca4c50e5c8ecb45943e919b6fa13f304dca0b48b568d61b35450c5dc89e3d30303ccc91072048ec17ae99f034742a29c49c64547da0599dc4d1a96f6811090471f3d8f9edd2aecbcafb4ebabbacfee3231fcbdd461748477c5e582e31a6c1a92a8304cd7d3a70fa94c6df633d60cb1e43ffd0064412b6ac380747212d5e762160cdae581bba13d00c1ebc2909e5d2a5c924ecc294e0e2f02acfde4ed9e8d8d95526e570aa3a1486ec7a2ab3a5d3a81f521826ea7ae023a1d76394491cce9e14f9088294766939351a5ce6f7d234e651f4198c6a9d0309df2931571a6999ae3936e0990eb2448204714de33f663917b783392a5ba81eb94878bfb4b42b63430760d8dc03c8e15073934ae1ed80543005217ef2ec37ed9440d4a871c0a64bcaa2b2721915c9a001c82def30da31cc74f204a9bb9f83e334caed635475aa437b9ee3355bc3c69a96b60b60a1d555946d06cd9ebddbb37de3ba4b59f46edea410c2297e96929b57e4d88a862749d6c9bc574e55f692b291b94a3e598349636c37dc9f7c250674dea833701c5121e920eb47375276544f0e58b51caa5d2cb789893bfefe2ec07fdc3eb28459db1037cd935ebe23c39aeacef410ea48cdbbdf7f675d0f37671fcb914a5de3feaf0e2a3b89ddffc7e1fe9f412c15669c43b3a513f62f58afc5fa59bcf752a75bd4e0e318ec3670bf7f94e3ceb0327eacb36a010b9451ec08a76bc783f97aad39904ea0b5d276f96a4575dfa456064c5beeb32e599f41c8492ee38d083581fff677f68d84abc55d0977ba7c87b3fc1c8a311cc7084ad9e82bf5313413ac476d89b59a495c0476f6c1a2a2de13180ac1fdce2e949e1d2aaa93ac1a697fc278f8dcf464101dd45e8d35c64bfb71721a1d96e3efe813f9ec931ae6fdb2fa7dd1f9d749b7bd8e95fb7904d1a3a6ce7b6d04198bb933f3255d3a104c6327fdd922b16b6419d9b56cad5ac9db74d8c824ba10dd870db6f877bee1c3f041d45bc889d7caac0b1cab9daa5de03eab31823e83642b20dd6ca05ce9aaad1ffdb8b212393322c64d9978b8bf54447259db3cac222286a9adc8bcf1415d563029cff8549cada95f593ea92be66a12f3224beb24d0dd4183ac1d3fcacc7f7bfebbdf19da71633253c1e814aa935a54b3cce796e75eb35611e08f3cbee2a6f6182afefe8c254900c99856dfc0ce1212869e4fed5c2406213de1fe8d64644fd2c46a832f86ab55a2ea4e83c4a43ea89c80e407fdbf97206a9d9bafdac087d0a74a8fecfc3aa00a4e7114bc5c06fe954eba69e1e6c56a344e4cf4fa392c7931d4c8973e3c8487cea6eee14ea1f9f2fbf7eb00e00bb7257ba6d943bdea78962bf15c77cbbda0ae4138275dd67350c513d755a3d28ea507e25a130f7853883d2783e6b38adab6ae9fb64d09fe58e7943c21026e0b11682fe3914f7dd623b3b7be8205f23619ee317bfe97e17b5c1ab06521f01af8c3093fe47a653fd8a6073a1ed131beb3964808ce8c50ade7a61cc993607a9dfd0c2e400da6e6f8d4de440243204e0d88dfe98fba8737f4f97c732ea2258bfcf8af310bd2fd6bcaa4eb31acd7ba7e4e2149670a5e966fde96d1045ece9448a40fd935d307445619a822066c87d7800dec3c7fbe8e97612a205b9f1e914862c717a0f9572a3c6a381d0636ee2a8b80ee7d8e2d32f4eae89650feaf233bd476a6aa564745849cd5827f6ccba385a812557fa383b001700aea61924906cbf23c57e83ed7be4525280f71d05fe3e0fad5079e505f9fdf2c11d19e0e4e0241f8268d810547bbc72b6f80fd23b522a489842a6ce5341a9a48ab293343d00e809af6ddba5ad3ff0e020b4edaf21afc735662c575c14ce009918681eb7651641a5d7d6341f219ad34ee69bc4d4490c777b35e335010456262d4f37f708c9e9847b579294935010b156435ffb1eabdd085d24e5af44d4bdfb43257a974cfd29bbecaddca3a4519454ce1438c1e609d83d9cc5028bdea4a98055a053f2d52b561c03ba468a4d49b3215a078f167de3e4515a97260c6a3c54357f3d4c118ddd54801b029d0dcdab3345332cbe624b462fef6ff3ffad531019de86eeb584723aa7340058c19533e3ba41b9f0cbac421191aad1682295423dfe20cb63ae46c78cef5329a63b7ee63bb94ed2b200c0aca8880d6098f64c9068d1661b60cbe22f616b11a90896435230aafc0748b6813abd9fcffdd11c29a5a0815383660e2c12ab72ed3c8fa34531bc82f3a1ca0fed474b378e70110073df7a0b96382f8671535d8cde879226a44b93f29f3b252caa224435d1a4ca7af4338677c09ac45b05ccb9b23f175ec471efe8c305a19722f507b09f16df0899161834ee8d29c8f2a333b2931e4546a4e34ec2a583fec53da1e7254f8810e492c478d6455ba4c95716dda0ce06ba0a66b0ff7ae9840c3be78ac498aac0380ff4b1943e411cac31062b7281231923ea44ed30b0409e13f88937a4e8f324bc0137956d2fcc93b7dc92c15643fab620fdd99c94d7a0a1e77ef57eba266f880b388e1451c95af7fd6c2710bb9eec3544db03aba114b11b237398c2aed999c5a2ce60808b9d40c20ca5eecfb5cb1a4ee3c35fa0172194e3a6046d2b2200e9728a9d09885b3baa75c4ced3e0c2376f5700d8e920d85b268e4d303f58bcb577c566ca48e4f79d3da719edb850d7333c4f7102961091737757f15fa1fa376c70e7a533a62086bde95c1e09b5ea6a1df244eeb53ca837a3082d73b59dd6ec402a1e44287bb5e794f8d2d336cc467bd0a9baefa38936249987ab9f04c465c2baa91b2b9fb9bb85bd663ca976c32487ee44876e04b3990338930f30d502fc255acecfee26662381ea40a64e77eb6e630af506eef28b74df105116ab46e310a54aa5455d570a33d8e90d1fdf3380735c0eb68a9aa8464bbf850e9e14abce7256eb8e4c29347856cbcae34809e8f81afd561d9a2e6453947435defab2c85220d15775fba7065891de05526057445b24a801540328c61bffb4048a8d37c59f80994a34dff007258989a9b141ef09225eca17d710de0d1d9b15928944efc07ee30e66f32a209d3c4bc122c70acdff7bc4a5ba59bd774888e7dd09a18b262de70d25919c7255ff291c123349405d0a36e124791eb1a06accc749ea2423f7ffd0fe8aa1a24e856aa7bccc9c5a226ffa0a812f84f9c079f001999448b307d3dc7bdad516f52390072acbcdf1c5bcc8238846aec86904068309b4f6377e4f6b7c19c283a29305eb81c7044b146c05eff25dc", @typed={0x4, 0xb}, @typed={0xa, 0x22, 0x0, 0x0, @str='wlan1\x00'}, @generic="9f4a2367e21b9170c6907308320acf3f08ac9ab55f9b0b430d986d75599b874dc0d96f980a2ed797ea749df5cb3155b17996f4ba6da7dc91", @typed={0x4, 0x2b}, @typed={0x8, 0x26, 0x0, 0x0, @ipv4=@private=0xa010100}, @typed={0x8, 0x59, 0x0, 0x0, @uid=0xee01}, @generic="a9dc58f2cd45a80d0d413878f6659fa1eb94494f8ad5b3f10016748c186808c62e396ac3e858be94fc45487813e11a040b821b6f123511b8b492033f45b7211f441fda06c9f04101098477d94d11189b6d5e8d76be3d8ccaf526ed62f397255fa7486c8d6bd3c8238f149ef1a417627b285932d6d82b974c7fb9dd2b00b0aa458207b6a9e6cff097f8da7250f26901eeb4633508e3d033e386d132d0fd28fd216fcabc249d2d89c91b3a357b2a77f2be18"]}]}, 0x22d4}, {&(0x7f0000000140)={0x28, 0x39, 0x200, 0x70bd26, 0x25dfdbfd, "", [@nested={0x10, 0xf, 0x0, 0x1, [@typed={0xa, 0x46, 0x0, 0x0, @str='wlan1\x00'}]}, @typed={0x8, 0x84, 0x0, 0x0, @ipv4=@broadcast}]}, 0x28}], 0x2, &(0x7f0000000300)=[@rights={{0x14, 0x1, 0x1, [r5]}}, @rights={{0x18, 0x1, 0x1, [r6, r1]}}, @cred={{0x1c, 0x1, 0x2, {r7, 0xee01, 0xee01}}}, @cred={{0x1c, 0x1, 0x2, {0xffffffffffffffff, 0x0, r8}}}, @rights={{0x14, 0x1, 0x1, [r1]}}, @rights={{0x14, 0x1, 0x1, [r1]}}, @rights={{0x18, 0x1, 0x1, [r9, r10]}}], 0xb8, 0x800}, 0x0) (async) sendmsg$nl_route(r0, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x40000000}, 0xc, &(0x7f00000000c0)={&(0x7f0000000040)=@newlinkprop={0x44, 0x6c, 0x200, 0x70bd2d, 0x25dfdbfe, {0x0, 0x0, 0x0, 0x0, 0x40, 0x30400}, [@IFLA_TARGET_NETNSID={0x8}, @IFLA_NET_NS_PID={0x8, 0x13, r4}, @IFLA_BROADCAST={0xa, 0x2, @multicast}, @IFLA_NET_NS_PID={0x8}]}, 0x44}, 0x1, 0x0, 0x0, 0x4000085}, 0x20040010) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) r11 = socket$netlink(0x10, 0x3, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r11, 0x0, 0x0) 10:42:11 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='blkio.bfq.sectors\x00', 0x275a, 0x0) write$binfmt_script(r1, &(0x7f00000000c0)=ANY=[], 0x208e24b) (async) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x200000b, 0x28011, r1, 0x0) (async) r2 = socket$alg(0x26, 0x5, 0x0) bind$alg(r2, &(0x7f0000000000)={0x26, 'aead\x00', 0x0, 0x0, 'aegis128-generic\x00'}, 0x58) (async) r3 = accept4(r2, 0x0, 0x0, 0x0) setsockopt$ALG_SET_KEY(r2, 0x117, 0x1, &(0x7f0000000200)="ad00"/16, 0x10) (async) recvmmsg(r3, &(0x7f0000002440), 0x3ffffffffffff67, 0x0, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2195.591327][ T8340] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 10:42:11 executing program 2: pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r0) ioctl$sock_SIOCGIFVLAN_GET_VLAN_INGRESS_PRIORITY_CMD(r0, 0x8982, &(0x7f0000000000)) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) pipe(&(0x7f0000000140)) (async) close(r0) (async) ioctl$sock_SIOCGIFVLAN_GET_VLAN_INGRESS_PRIORITY_CMD(r0, 0x8982, &(0x7f0000000000)) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) [ 2195.610652][ T8340] workqueue: Failed to create a rescuer kthread for wq "bond856": -EINTR [ 2195.775161][ T8377] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2195.795156][ T8364] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 10:42:11 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xfeff0000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:42:12 executing program 2: ioctl$F2FS_IOC_MOVE_RANGE(0xffffffffffffffff, 0xc020f509, &(0x7f0000000040)={0xffffffffffffffff, 0x1f, 0x6a, 0x200}) r1 = openat$cgroup(r0, &(0x7f0000000100)='syz1\x00', 0x200002, 0x0) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r2, 0x0, 0x8000000000004) openat$cgroup_ro(r2, &(0x7f00000000c0)='hugetlb.2MB.rsvd.usage_in_bytes\x00', 0x0, 0x0) r3 = openat$cgroup_ro(r1, &(0x7f0000000080)='blkio.bfq.io_queued\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r3, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:12 executing program 2: ioctl$F2FS_IOC_MOVE_RANGE(0xffffffffffffffff, 0xc020f509, &(0x7f0000000040)={0xffffffffffffffff, 0x1f, 0x6a, 0x200}) r1 = openat$cgroup(r0, &(0x7f0000000100)='syz1\x00', 0x200002, 0x0) (async) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r2, 0x0, 0x8000000000004) (async) openat$cgroup_ro(r2, &(0x7f00000000c0)='hugetlb.2MB.rsvd.usage_in_bytes\x00', 0x0, 0x0) (async) r3 = openat$cgroup_ro(r1, &(0x7f0000000080)='blkio.bfq.io_queued\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r3, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2195.967251][ T8364] 8021q: adding VLAN 0 to HW filter on device bond1416 10:42:12 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) r0 = syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r1 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) r2 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r3 = openat$cgroup_ro(r2, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r3, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r3, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000400)=ANY=[@ANYRESDEC=r1, @ANYRES32=r3, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100", @ANYRESHEX=r0, @ANYRESOCT=r2, @ANYBLOB="ce2acbf1ca6c99376211828c78967ac130505a43562d7d78ac6b76aae175d68369a78b1b039f41815895b87ae46b2fe7befcc3109b1b6103f56edc0809cd5b7d0bea5487a0c5598c62d8a03803cc54986ad9dd3cccbe1a50400c1fd6ec5f96cc621b6e2460733b5bbeb108552f2338b08371e837de6e56379404a4b033f03cffe15a5ef3eeec04a14fa0de"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0xa, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r3, 0x0, 0x0) sendmsg$nl_route_sched(r3, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x4000}, 0xc, &(0x7f00000000c0)={&(0x7f0000000040)=@getchain={0x4c, 0x66, 0x20, 0x70bd27, 0x25dfdbfc, {0x0, 0x0, 0x0, 0x0, {0xfff2, 0x10}, {0x5, 0x6}, {0x8, 0xffe8}}, [{0x8, 0xb, 0x80}, {0x8, 0xb, 0x6}, {0x8, 0xb, 0x6}, {0x8, 0xb, 0xf4}, {0x8, 0xb, 0xfff}]}, 0x4c}, 0x1, 0x0, 0x0, 0x8080}, 0x80) sendmsg$nl_route(r1, 0x0, 0x0) 10:42:12 executing program 2: ioctl$F2FS_IOC_MOVE_RANGE(0xffffffffffffffff, 0xc020f509, &(0x7f0000000040)={0xffffffffffffffff, 0x1f, 0x6a, 0x200}) r1 = openat$cgroup(r0, &(0x7f0000000100)='syz1\x00', 0x200002, 0x0) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r2, 0x0, 0x8000000000004) openat$cgroup_ro(r2, &(0x7f00000000c0)='hugetlb.2MB.rsvd.usage_in_bytes\x00', 0x0, 0x0) r3 = openat$cgroup_ro(r1, &(0x7f0000000080)='blkio.bfq.io_queued\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r3, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) ioctl$F2FS_IOC_MOVE_RANGE(0xffffffffffffffff, 0xc020f509, &(0x7f0000000040)={0xffffffffffffffff, 0x1f, 0x6a, 0x200}) (async) openat$cgroup(r0, &(0x7f0000000100)='syz1\x00', 0x200002, 0x0) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) sendfile(0xffffffffffffffff, r2, 0x0, 0x8000000000004) (async) openat$cgroup_ro(r2, &(0x7f00000000c0)='hugetlb.2MB.rsvd.usage_in_bytes\x00', 0x0, 0x0) (async) openat$cgroup_ro(r1, &(0x7f0000000080)='blkio.bfq.io_queued\x00', 0x275a, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r3, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) 10:42:12 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f00000001c0)='blkio.bfq.sectors\x00', 0x275a, 0x0) write$binfmt_script(r1, &(0x7f00000000c0)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x200000b, 0x28011, r1, 0x0) socket$alg(0x26, 0x5, 0x0) (async) r2 = socket$alg(0x26, 0x5, 0x0) bind$alg(r2, &(0x7f0000000000)={0x26, 'aead\x00', 0x0, 0x0, 'aegis128-generic\x00'}, 0x58) r3 = accept4(r2, 0x0, 0x0, 0x0) setsockopt$ALG_SET_KEY(r2, 0x117, 0x1, &(0x7f0000000200)="ad00"/16, 0x10) (async) setsockopt$ALG_SET_KEY(r2, 0x117, 0x1, &(0x7f0000000200)="ad00"/16, 0x10) recvmmsg(r3, &(0x7f0000002440), 0x3ffffffffffff67, 0x0, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2196.315741][ T8366] bond1416: (slave bridge1279): making interface the new active one [ 2196.367233][ T8366] bond1416: (slave bridge1279): Enslaving as an active interface with an up link [ 2196.394773][ T8415] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2196.409587][ T8378] netlink: 'syz-executor.3': attribute type 1 has an invalid length. 10:42:12 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xfeff0000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:12 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000000000)=0xfffffffffffffbff) [ 2196.513784][ T8419] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2196.661883][ T8378] 8021q: adding VLAN 0 to HW filter on device bond1453 10:42:12 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:12 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) r0 = syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) r1 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) r2 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r3 = openat$cgroup_ro(r2, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r3, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r3, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) (async) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000400)=ANY=[@ANYRESDEC=r1, @ANYRES32=r3, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100", @ANYRESHEX=r0, @ANYRESOCT=r2, @ANYBLOB="ce2acbf1ca6c99376211828c78967ac130505a43562d7d78ac6b76aae175d68369a78b1b039f41815895b87ae46b2fe7befcc3109b1b6103f56edc0809cd5b7d0bea5487a0c5598c62d8a03803cc54986ad9dd3cccbe1a50400c1fd6ec5f96cc621b6e2460733b5bbeb108552f2338b08371e837de6e56379404a4b033f03cffe15a5ef3eeec04a14fa0de"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0xa, 0x80000001, 0xfffff000}, 0x10}, 0x80) (async) write$tun(r3, 0x0, 0x0) (async) sendmsg$nl_route_sched(r3, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x4000}, 0xc, &(0x7f00000000c0)={&(0x7f0000000040)=@getchain={0x4c, 0x66, 0x20, 0x70bd27, 0x25dfdbfc, {0x0, 0x0, 0x0, 0x0, {0xfff2, 0x10}, {0x5, 0x6}, {0x8, 0xffe8}}, [{0x8, 0xb, 0x80}, {0x8, 0xb, 0x6}, {0x8, 0xb, 0x6}, {0x8, 0xb, 0xf4}, {0x8, 0xb, 0xfff}]}, 0x4c}, 0x1, 0x0, 0x0, 0x8080}, 0x80) (async, rerun: 32) sendmsg$nl_route(r1, 0x0, 0x0) (rerun: 32) 10:42:12 executing program 4: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r0 = syz_init_net_socket$ax25(0x3, 0x5, 0xcf) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000000000)=0xfffffffffffffbff) 10:42:12 executing program 2: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000000000)=0xfffffffffffffbff) [ 2196.724213][ T8384] bond1453: (slave bridge1348): making interface the new active one [ 2196.741907][ T8384] bond1453: (slave bridge1348): Enslaving as an active interface with an up link [ 2196.758886][ T8406] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2196.875214][ T8406] 8021q: adding VLAN 0 to HW filter on device bond856 10:42:13 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xfeffffff}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:42:13 executing program 4: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) r0 = syz_init_net_socket$ax25(0x3, 0x5, 0xcf) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000000000)=0xfffffffffffffbff) [ 2196.931200][ T8396] bond856: (slave bridge1011): making interface the new active one [ 2196.949391][ T8396] bond856: (slave bridge1011): Enslaving as an active interface with an up link [ 2196.961146][ T8436] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:42:13 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000000000)=0xfffffffffffffbff) [ 2197.040637][ T8423] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 10:42:13 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) r0 = syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r1 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) r2 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r3 = openat$cgroup_ro(r2, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r3, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r3, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000400)=ANY=[@ANYRESDEC=r1, @ANYRES32=r3, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100", @ANYRESHEX=r0, @ANYRESOCT=r2, @ANYBLOB="ce2acbf1ca6c99376211828c78967ac130505a43562d7d78ac6b76aae175d68369a78b1b039f41815895b87ae46b2fe7befcc3109b1b6103f56edc0809cd5b7d0bea5487a0c5598c62d8a03803cc54986ad9dd3cccbe1a50400c1fd6ec5f96cc621b6e2460733b5bbeb108552f2338b08371e837de6e56379404a4b033f03cffe15a5ef3eeec04a14fa0de"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0xa, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r3, 0x0, 0x0) sendmsg$nl_route_sched(r3, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x4000}, 0xc, &(0x7f00000000c0)={&(0x7f0000000040)=@getchain={0x4c, 0x66, 0x20, 0x70bd27, 0x25dfdbfc, {0x0, 0x0, 0x0, 0x0, {0xfff2, 0x10}, {0x5, 0x6}, {0x8, 0xffe8}}, [{0x8, 0xb, 0x80}, {0x8, 0xb, 0x6}, {0x8, 0xb, 0x6}, {0x8, 0xb, 0xf4}, {0x8, 0xb, 0xfff}]}, 0x4c}, 0x1, 0x0, 0x0, 0x8080}, 0x80) sendmsg$nl_route(r1, 0x0, 0x0) mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (async) accept(0xffffffffffffffff, 0x0, 0x0) (async) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) socket$netlink(0x10, 0x3, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) (async) openat$cgroup_ro(r2, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) (async) ioctl$FS_IOC_RESVSP(r3, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) (async) ioctl$FS_IOC_RESVSP(r3, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) (async) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000400)=ANY=[@ANYRESDEC=r1, @ANYRES32=r3, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100", @ANYRESHEX=r0, @ANYRESOCT=r2, @ANYBLOB="ce2acbf1ca6c99376211828c78967ac130505a43562d7d78ac6b76aae175d68369a78b1b039f41815895b87ae46b2fe7befcc3109b1b6103f56edc0809cd5b7d0bea5487a0c5598c62d8a03803cc54986ad9dd3cccbe1a50400c1fd6ec5f96cc621b6e2460733b5bbeb108552f2338b08371e837de6e56379404a4b033f03cffe15a5ef3eeec04a14fa0de"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0xa, 0x80000001, 0xfffff000}, 0x10}, 0x80) (async) write$tun(r3, 0x0, 0x0) (async) sendmsg$nl_route_sched(r3, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x4000}, 0xc, &(0x7f00000000c0)={&(0x7f0000000040)=@getchain={0x4c, 0x66, 0x20, 0x70bd27, 0x25dfdbfc, {0x0, 0x0, 0x0, 0x0, {0xfff2, 0x10}, {0x5, 0x6}, {0x8, 0xffe8}}, [{0x8, 0xb, 0x80}, {0x8, 0xb, 0x6}, {0x8, 0xb, 0x6}, {0x8, 0xb, 0xf4}, {0x8, 0xb, 0xfff}]}, 0x4c}, 0x1, 0x0, 0x0, 0x8080}, 0x80) (async) sendmsg$nl_route(r1, 0x0, 0x0) (async) 10:42:13 executing program 4: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r0 = syz_init_net_socket$ax25(0x3, 0x5, 0xcf) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000000000)=0xfffffffffffffbff) [ 2197.191862][ T8450] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2197.223735][ T8423] 8021q: adding VLAN 0 to HW filter on device bond1417 10:42:13 executing program 4: sendmsg$802154_dgram(0xffffffffffffffff, &(0x7f0000000240)={&(0x7f0000000140)={0x24, @none={0x0, 0xffff}}, 0x14, &(0x7f0000000200)={&(0x7f0000000180)="489de230df76a8391b17f4d018770394630097fbaeece695897b44d6157432c27a88c86cdaf1dc1a17e4f513b470646d7b988cfa00bdbc9f10ee828e743ae8957fd6850ff2f4a6733c98239a0e48366920e4d3368d20cf4de0728cabfbb1843a6c1066ca064a0b16333b9d1f246aab42", 0x70}, 0x1, 0x0, 0x0, 0x10}, 0x15) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) ioctl$FS_IOC_READ_VERITY_METADATA(r0, 0xc0286687, &(0x7f0000000000)={0x3, 0x6, 0xbe, &(0x7f0000000080)=""/190}) [ 2197.451486][ T8470] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2197.518589][ T8424] bond1417: (slave bridge1280): making interface the new active one 10:42:13 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xfeffffff}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:13 executing program 2: sendmsg$NL80211_CMD_GET_PROTOCOL_FEATURES(0xffffffffffffffff, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x1}, 0xc, &(0x7f00000000c0)={&(0x7f0000000080)={0x14, 0x0, 0x100, 0x70bd26, 0x25dfdbfc, {}, ["", "", "", "", "", ""]}, 0x14}, 0x1, 0x0, 0x0, 0x20048010}, 0x4008000) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2197.571579][ T8424] bond1417: (slave bridge1280): Enslaving as an active interface with an up link [ 2197.594105][ T8432] netlink: 'syz-executor.3': attribute type 1 has an invalid length. [ 2197.650175][ T8475] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2197.737999][ T8432] 8021q: adding VLAN 0 to HW filter on device bond1454 [ 2197.807718][ T8435] bond1454: (slave bridge1349): making interface the new active one [ 2197.830617][ T8435] bond1454: (slave bridge1349): Enslaving as an active interface with an up link 10:42:13 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0x3}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:13 executing program 4: sendmsg$802154_dgram(0xffffffffffffffff, &(0x7f0000000240)={&(0x7f0000000140)={0x24, @none={0x0, 0xffff}}, 0x14, &(0x7f0000000200)={&(0x7f0000000180)="489de230df76a8391b17f4d018770394630097fbaeece695897b44d6157432c27a88c86cdaf1dc1a17e4f513b470646d7b988cfa00bdbc9f10ee828e743ae8957fd6850ff2f4a6733c98239a0e48366920e4d3368d20cf4de0728cabfbb1843a6c1066ca064a0b16333b9d1f246aab42", 0x70}, 0x1, 0x0, 0x0, 0x10}, 0x15) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) ioctl$FS_IOC_READ_VERITY_METADATA(r0, 0xc0286687, &(0x7f0000000000)={0x3, 0x6, 0xbe, &(0x7f0000000080)=""/190}) sendmsg$802154_dgram(0xffffffffffffffff, &(0x7f0000000240)={&(0x7f0000000140)={0x24, @none={0x0, 0xffff}}, 0x14, &(0x7f0000000200)={&(0x7f0000000180)="489de230df76a8391b17f4d018770394630097fbaeece695897b44d6157432c27a88c86cdaf1dc1a17e4f513b470646d7b988cfa00bdbc9f10ee828e743ae8957fd6850ff2f4a6733c98239a0e48366920e4d3368d20cf4de0728cabfbb1843a6c1066ca064a0b16333b9d1f246aab42", 0x70}, 0x1, 0x0, 0x0, 0x10}, 0x15) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) ioctl$FS_IOC_READ_VERITY_METADATA(r0, 0xc0286687, &(0x7f0000000000)={0x3, 0x6, 0xbe, &(0x7f0000000080)=""/190}) (async) 10:42:13 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r1 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) r2 = socket$nl_generic(0x10, 0x3, 0x10) r3 = syz_genetlink_get_family_id$ethtool(&(0x7f0000000180), 0xffffffffffffffff) sendmsg$ETHTOOL_MSG_FEATURES_GET(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f00000002c0)={0x2c, r3, 0x1, 0x0, 0x0, {0x5}, [@HEADER={0x18, 0x1, 0x0, 0x1, [@ETHTOOL_A_HEADER_DEV_NAME={0x14, 0x2, 'wg0\x00'}]}]}, 0x2c}}, 0x0) r4 = socket$nl_route(0x10, 0x3, 0x0) r5 = socket(0x1, 0x803, 0x0) getsockname$packet(r5, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) r7 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r8 = openat$cgroup_ro(r7, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r8, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r8, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r8, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r8, 0x0, 0x0) sendmsg$nl_generic(r8, &(0x7f0000000300)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x4000}, 0xc, &(0x7f0000000240)={&(0x7f0000000400)={0xec, 0x11, 0x10, 0x70bd2d, 0x25dfdbfd, {0x3}, [@generic="902f85ddb853d57a07f94c550d919019166f24bb4ff666276dec1c9a29b2ddce193a8ea8c97420c933b713b639837fd57a691907ba30c2afe9215b25431ada611c9ce54538c4bca9b3cda9fb2062c1afa6b20070351065c2f8aeb349d046f08bcd91eb2bab411ebbede25e47e0c12b6759a7f7b4b0b6eb40f79005a7f8aca4c5bb3647878726d4ccc64abd5b3d2364a9836f6dd94713d09bdf79aab1d068a37380183ede326744872bc69fe1ac2b090bf04a39e16be8113edff92f9a9f5047", @typed={0xc, 0x76, 0x0, 0x0, @u64=0x6}, @nested={0xc, 0x4e, 0x0, 0x1, [@typed={0x8, 0x9, 0x0, 0x0, @pid=0xffffffffffffffff}]}]}, 0xec}, 0x1, 0x0, 0x0, 0x80}, 0x10) sendmsg$nl_route(r4, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000180)=@newlink={0x3c, 0x10, 0x403, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @vlan={{0x9}, {0x4}}}, @IFLA_MASTER={0x8, 0x4, r6}]}, 0x3c}}, 0x0) r9 = socket$nl_route(0x10, 0x3, 0x0) r10 = socket(0x1, 0x803, 0x0) getsockname$packet(r10, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r9, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000180)=@newlink={0x3c, 0x10, 0x403, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @vlan={{0x9}, {0x4}}}, @IFLA_MASTER={0x8, 0x4, r11}]}, 0x3c}}, 0x0) sendmsg$ETHTOOL_MSG_WOL_SET(r0, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f00000000c0)={&(0x7f0000000040)={0x58, r3, 0x0, 0x70bd25, 0x25dfdbfc, {}, [@ETHTOOL_A_WOL_HEADER={0x44, 0x1, 0x0, 0x1, [@ETHTOOL_A_HEADER_DEV_NAME={0x14, 0x2, 'rose0\x00'}, @ETHTOOL_A_HEADER_DEV_NAME={0x14, 0x2, 'vcan0\x00'}, @ETHTOOL_A_HEADER_DEV_INDEX={0x8, 0x1, r6}, @ETHTOOL_A_HEADER_DEV_INDEX={0x8, 0x1, r11}, @ETHTOOL_A_HEADER_DEV_INDEX={0x8}]}]}, 0x58}, 0x1, 0x0, 0x0, 0x8004}, 0xc000) sendmsg$nl_route(r1, 0x0, 0x0) 10:42:13 executing program 2: sendmsg$NL80211_CMD_GET_PROTOCOL_FEATURES(0xffffffffffffffff, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x1}, 0xc, &(0x7f00000000c0)={&(0x7f0000000080)={0x14, 0x0, 0x100, 0x70bd26, 0x25dfdbfc, {}, ["", "", "", "", "", ""]}, 0x14}, 0x1, 0x0, 0x0, 0x20048010}, 0x4008000) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) sendmsg$NL80211_CMD_GET_PROTOCOL_FEATURES(0xffffffffffffffff, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x1}, 0xc, &(0x7f00000000c0)={&(0x7f0000000080)={0x14, 0x0, 0x100, 0x70bd26, 0x25dfdbfc, {}, ["", "", "", "", "", ""]}, 0x14}, 0x1, 0x0, 0x0, 0x20048010}, 0x4008000) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) [ 2197.852348][ T8456] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2197.917875][ T8483] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2198.024397][ T8456] 8021q: adding VLAN 0 to HW filter on device bond857 [ 2198.189212][ T8460] bond857: (slave bridge1012): making interface the new active one [ 2198.203600][ T8460] bond857: (slave bridge1012): Enslaving as an active interface with an up link 10:42:14 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xff0f0000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:42:14 executing program 2: sendmsg$NL80211_CMD_GET_PROTOCOL_FEATURES(0xffffffffffffffff, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x1}, 0xc, &(0x7f00000000c0)={&(0x7f0000000080)={0x14, 0x0, 0x100, 0x70bd26, 0x25dfdbfc, {}, ["", "", "", "", "", ""]}, 0x14}, 0x1, 0x0, 0x0, 0x20048010}, 0x4008000) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) sendmsg$NL80211_CMD_GET_PROTOCOL_FEATURES(0xffffffffffffffff, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x1}, 0xc, &(0x7f00000000c0)={&(0x7f0000000080)={0x14, 0x0, 0x100, 0x70bd26, 0x25dfdbfc, {}, ["", "", "", "", "", ""]}, 0x14}, 0x1, 0x0, 0x0, 0x20048010}, 0x4008000) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) 10:42:14 executing program 4: sendmsg$802154_dgram(0xffffffffffffffff, &(0x7f0000000240)={&(0x7f0000000140)={0x24, @none={0x0, 0xffff}}, 0x14, &(0x7f0000000200)={&(0x7f0000000180)="489de230df76a8391b17f4d018770394630097fbaeece695897b44d6157432c27a88c86cdaf1dc1a17e4f513b470646d7b988cfa00bdbc9f10ee828e743ae8957fd6850ff2f4a6733c98239a0e48366920e4d3368d20cf4de0728cabfbb1843a6c1066ca064a0b16333b9d1f246aab42", 0x70}, 0x1, 0x0, 0x0, 0x10}, 0x15) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) ioctl$FS_IOC_READ_VERITY_METADATA(r0, 0xc0286687, &(0x7f0000000000)={0x3, 0x6, 0xbe, &(0x7f0000000080)=""/190}) [ 2198.246868][ T8472] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2198.335614][ T8472] 8021q: adding VLAN 0 to HW filter on device bond1418 10:42:14 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = socket$isdn(0x22, 0x3, 0x10) write$binfmt_script(r1, &(0x7f0000000080)={'#! ', './file0', [{0x20, 'memory.events\x00'}, {0x20, 'memory.events\x00'}, {0x20, 'memory.events\x00'}], 0xa, "3c39139f0c08ce8c66a60d8a31d213aee18c2962d65c92c7ffdf5809c80f6deddf63867eaf52d85f1820e4a8dcc65f4084c05969ea755a9a6d4783c01d2a0859fc6b5cd5bee8709bcc1cd1f64d85a2b09dd6d3c525f7662e3731a9ddbbd86868bbc55f7271df06ec6c49704359d6a6031704ba31cbb9e79e1e03d15bdc8fbcd61932deae75119bfccb7a21d7570ce5ab4f4a3f"}, 0xcb) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2198.378693][ T8476] bridge1281: entered promiscuous mode [ 2198.462008][ T8476] bridge1281: entered allmulticast mode 10:42:14 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r0, 0x0, 0x8000000000004) r1 = openat$cgroup_ro(r0, &(0x7f0000000000)='blkio.bfq.time_recursive\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:14 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = socket$isdn(0x22, 0x3, 0x10) write$binfmt_script(r1, &(0x7f0000000080)={'#! ', './file0', [{0x20, 'memory.events\x00'}, {0x20, 'memory.events\x00'}, {0x20, 'memory.events\x00'}], 0xa, "3c39139f0c08ce8c66a60d8a31d213aee18c2962d65c92c7ffdf5809c80f6deddf63867eaf52d85f1820e4a8dcc65f4084c05969ea755a9a6d4783c01d2a0859fc6b5cd5bee8709bcc1cd1f64d85a2b09dd6d3c525f7662e3731a9ddbbd86868bbc55f7271df06ec6c49704359d6a6031704ba31cbb9e79e1e03d15bdc8fbcd61932deae75119bfccb7a21d7570ce5ab4f4a3f"}, 0xcb) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) socket$isdn(0x22, 0x3, 0x10) (async) write$binfmt_script(r1, &(0x7f0000000080)={'#! ', './file0', [{0x20, 'memory.events\x00'}, {0x20, 'memory.events\x00'}, {0x20, 'memory.events\x00'}], 0xa, "3c39139f0c08ce8c66a60d8a31d213aee18c2962d65c92c7ffdf5809c80f6deddf63867eaf52d85f1820e4a8dcc65f4084c05969ea755a9a6d4783c01d2a0859fc6b5cd5bee8709bcc1cd1f64d85a2b09dd6d3c525f7662e3731a9ddbbd86868bbc55f7271df06ec6c49704359d6a6031704ba31cbb9e79e1e03d15bdc8fbcd61932deae75119bfccb7a21d7570ce5ab4f4a3f"}, 0xcb) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) 10:42:14 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xff0f0000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:14 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r0, 0x0, 0x8000000000004) (async) r1 = openat$cgroup_ro(r0, &(0x7f0000000000)='blkio.bfq.time_recursive\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2198.975797][ T8494] 8021q: adding VLAN 0 to HW filter on device bond1455 10:42:15 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0x4}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:15 executing program 4: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = socket$isdn(0x22, 0x3, 0x10) write$binfmt_script(r1, &(0x7f0000000080)={'#! ', './file0', [{0x20, 'memory.events\x00'}, {0x20, 'memory.events\x00'}, {0x20, 'memory.events\x00'}], 0xa, "3c39139f0c08ce8c66a60d8a31d213aee18c2962d65c92c7ffdf5809c80f6deddf63867eaf52d85f1820e4a8dcc65f4084c05969ea755a9a6d4783c01d2a0859fc6b5cd5bee8709bcc1cd1f64d85a2b09dd6d3c525f7662e3731a9ddbbd86868bbc55f7271df06ec6c49704359d6a6031704ba31cbb9e79e1e03d15bdc8fbcd61932deae75119bfccb7a21d7570ce5ab4f4a3f"}, 0xcb) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:15 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (async) accept(0xffffffffffffffff, 0x0, 0x0) (async) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) r1 = socket$netlink(0x10, 0x3, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) r2 = socket$nl_generic(0x10, 0x3, 0x10) (async) r3 = syz_genetlink_get_family_id$ethtool(&(0x7f0000000180), 0xffffffffffffffff) sendmsg$ETHTOOL_MSG_FEATURES_GET(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f00000002c0)={0x2c, r3, 0x1, 0x0, 0x0, {0x5}, [@HEADER={0x18, 0x1, 0x0, 0x1, [@ETHTOOL_A_HEADER_DEV_NAME={0x14, 0x2, 'wg0\x00'}]}]}, 0x2c}}, 0x0) (async) r4 = socket$nl_route(0x10, 0x3, 0x0) (async) r5 = socket(0x1, 0x803, 0x0) getsockname$packet(r5, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) (async) r7 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r8 = openat$cgroup_ro(r7, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r8, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) (async) ioctl$FS_IOC_RESVSP(r8, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) (async) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r8, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r8, 0x0, 0x0) (async, rerun: 32) sendmsg$nl_generic(r8, &(0x7f0000000300)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x4000}, 0xc, &(0x7f0000000240)={&(0x7f0000000400)={0xec, 0x11, 0x10, 0x70bd2d, 0x25dfdbfd, {0x3}, [@generic="902f85ddb853d57a07f94c550d919019166f24bb4ff666276dec1c9a29b2ddce193a8ea8c97420c933b713b639837fd57a691907ba30c2afe9215b25431ada611c9ce54538c4bca9b3cda9fb2062c1afa6b20070351065c2f8aeb349d046f08bcd91eb2bab411ebbede25e47e0c12b6759a7f7b4b0b6eb40f79005a7f8aca4c5bb3647878726d4ccc64abd5b3d2364a9836f6dd94713d09bdf79aab1d068a37380183ede326744872bc69fe1ac2b090bf04a39e16be8113edff92f9a9f5047", @typed={0xc, 0x76, 0x0, 0x0, @u64=0x6}, @nested={0xc, 0x4e, 0x0, 0x1, [@typed={0x8, 0x9, 0x0, 0x0, @pid=0xffffffffffffffff}]}]}, 0xec}, 0x1, 0x0, 0x0, 0x80}, 0x10) (async, rerun: 32) sendmsg$nl_route(r4, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000180)=@newlink={0x3c, 0x10, 0x403, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @vlan={{0x9}, {0x4}}}, @IFLA_MASTER={0x8, 0x4, r6}]}, 0x3c}}, 0x0) r9 = socket$nl_route(0x10, 0x3, 0x0) (async) r10 = socket(0x1, 0x803, 0x0) getsockname$packet(r10, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r9, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000180)=@newlink={0x3c, 0x10, 0x403, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @vlan={{0x9}, {0x4}}}, @IFLA_MASTER={0x8, 0x4, r11}]}, 0x3c}}, 0x0) sendmsg$ETHTOOL_MSG_WOL_SET(r0, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f00000000c0)={&(0x7f0000000040)={0x58, r3, 0x0, 0x70bd25, 0x25dfdbfc, {}, [@ETHTOOL_A_WOL_HEADER={0x44, 0x1, 0x0, 0x1, [@ETHTOOL_A_HEADER_DEV_NAME={0x14, 0x2, 'rose0\x00'}, @ETHTOOL_A_HEADER_DEV_NAME={0x14, 0x2, 'vcan0\x00'}, @ETHTOOL_A_HEADER_DEV_INDEX={0x8, 0x1, r6}, @ETHTOOL_A_HEADER_DEV_INDEX={0x8, 0x1, r11}, @ETHTOOL_A_HEADER_DEV_INDEX={0x8}]}]}, 0x58}, 0x1, 0x0, 0x0, 0x8004}, 0xc000) (async, rerun: 64) sendmsg$nl_route(r1, 0x0, 0x0) (rerun: 64) 10:42:15 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r0, 0x0, 0x8000000000004) (async) r1 = openat$cgroup_ro(r0, &(0x7f0000000000)='blkio.bfq.time_recursive\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2199.058026][ T8496] bond1455: (slave bridge1350): making interface the new active one [ 2199.086142][ T8496] bond1455: (slave bridge1350): Enslaving as an active interface with an up link 10:42:15 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xff7f0000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:42:15 executing program 4: socket$inet6_udplite(0xa, 0x2, 0x88) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) connect$inet(r0, &(0x7f0000000000)={0x2, 0x4e24, @multicast1}, 0x10) ioctl$BTRFS_IOC_BALANCE_CTL(r0, 0x40049421, 0x0) 10:42:15 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) (async) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r1 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) r2 = socket$nl_generic(0x10, 0x3, 0x10) (async) r3 = syz_genetlink_get_family_id$ethtool(&(0x7f0000000180), 0xffffffffffffffff) sendmsg$ETHTOOL_MSG_FEATURES_GET(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={&(0x7f00000002c0)={0x2c, r3, 0x1, 0x0, 0x0, {0x5}, [@HEADER={0x18, 0x1, 0x0, 0x1, [@ETHTOOL_A_HEADER_DEV_NAME={0x14, 0x2, 'wg0\x00'}]}]}, 0x2c}}, 0x0) r4 = socket$nl_route(0x10, 0x3, 0x0) (async) r5 = socket(0x1, 0x803, 0x0) getsockname$packet(r5, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) (async) r7 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r8 = openat$cgroup_ro(r7, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r8, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) (async) ioctl$FS_IOC_RESVSP(r8, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) (async) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r8, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r8, 0x0, 0x0) (async) sendmsg$nl_generic(r8, &(0x7f0000000300)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x4000}, 0xc, &(0x7f0000000240)={&(0x7f0000000400)={0xec, 0x11, 0x10, 0x70bd2d, 0x25dfdbfd, {0x3}, [@generic="902f85ddb853d57a07f94c550d919019166f24bb4ff666276dec1c9a29b2ddce193a8ea8c97420c933b713b639837fd57a691907ba30c2afe9215b25431ada611c9ce54538c4bca9b3cda9fb2062c1afa6b20070351065c2f8aeb349d046f08bcd91eb2bab411ebbede25e47e0c12b6759a7f7b4b0b6eb40f79005a7f8aca4c5bb3647878726d4ccc64abd5b3d2364a9836f6dd94713d09bdf79aab1d068a37380183ede326744872bc69fe1ac2b090bf04a39e16be8113edff92f9a9f5047", @typed={0xc, 0x76, 0x0, 0x0, @u64=0x6}, @nested={0xc, 0x4e, 0x0, 0x1, [@typed={0x8, 0x9, 0x0, 0x0, @pid=0xffffffffffffffff}]}]}, 0xec}, 0x1, 0x0, 0x0, 0x80}, 0x10) (async) sendmsg$nl_route(r4, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000180)=@newlink={0x3c, 0x10, 0x403, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @vlan={{0x9}, {0x4}}}, @IFLA_MASTER={0x8, 0x4, r6}]}, 0x3c}}, 0x0) (async) r9 = socket$nl_route(0x10, 0x3, 0x0) (async) r10 = socket(0x1, 0x803, 0x0) getsockname$packet(r10, &(0x7f0000000100)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r9, &(0x7f0000000280)={0x0, 0x0, &(0x7f0000000140)={&(0x7f0000000180)=@newlink={0x3c, 0x10, 0x403, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @vlan={{0x9}, {0x4}}}, @IFLA_MASTER={0x8, 0x4, r11}]}, 0x3c}}, 0x0) sendmsg$ETHTOOL_MSG_WOL_SET(r0, &(0x7f0000000100)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x4}, 0xc, &(0x7f00000000c0)={&(0x7f0000000040)={0x58, r3, 0x0, 0x70bd25, 0x25dfdbfc, {}, [@ETHTOOL_A_WOL_HEADER={0x44, 0x1, 0x0, 0x1, [@ETHTOOL_A_HEADER_DEV_NAME={0x14, 0x2, 'rose0\x00'}, @ETHTOOL_A_HEADER_DEV_NAME={0x14, 0x2, 'vcan0\x00'}, @ETHTOOL_A_HEADER_DEV_INDEX={0x8, 0x1, r6}, @ETHTOOL_A_HEADER_DEV_INDEX={0x8, 0x1, r11}, @ETHTOOL_A_HEADER_DEV_INDEX={0x8}]}]}, 0x58}, 0x1, 0x0, 0x0, 0x8004}, 0xc000) (async) sendmsg$nl_route(r1, 0x0, 0x0) [ 2199.211627][ T8512] workqueue: Failed to create a rescuer kthread for wq "bond858": -EINTR [ 2199.374891][ T8525] 8021q: adding VLAN 0 to HW filter on device bond1419 10:42:15 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = bpf$ITER_CREATE(0x21, &(0x7f00000000c0)={r0}, 0x8) r2 = openat$cgroup_ro(r1, &(0x7f0000000100)='hugetlb.2MB.usage_in_bytes\x00', 0x0, 0x0) r3 = bpf$BPF_MAP_GET_FD_BY_ID(0xe, &(0x7f0000000140)={0x0, 0x1, 0x8}, 0xc) r4 = bpf$OBJ_GET_PROG(0x7, &(0x7f00000001c0)={&(0x7f0000000180)='./file0\x00', 0x0, 0x1c}, 0x10) bpf$BPF_PROG_ATTACH(0x8, &(0x7f0000000200)={@map=r3, r2, 0x15, 0x1, r4}, 0x14) ioctl$sock_SIOCGIFINDEX(0xffffffffffffffff, 0x8933, &(0x7f0000000080)={'hsr0\x00'}) openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000)='./cgroup/syz1\x00', 0x200002, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2199.469299][ T8526] bond1419: (slave bridge1281): making interface the new active one [ 2199.484197][ T8526] bond1419: (slave bridge1281): Enslaving as an active interface with an up link 10:42:15 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xff7f0000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2199.632920][ T8538] 8021q: adding VLAN 0 to HW filter on device bond1456 10:42:15 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = bpf$ITER_CREATE(0x21, &(0x7f00000000c0)={r0}, 0x8) r2 = openat$cgroup_ro(r1, &(0x7f0000000100)='hugetlb.2MB.usage_in_bytes\x00', 0x0, 0x0) r3 = bpf$BPF_MAP_GET_FD_BY_ID(0xe, &(0x7f0000000140)={0x0, 0x1, 0x8}, 0xc) (async, rerun: 64) r4 = bpf$OBJ_GET_PROG(0x7, &(0x7f00000001c0)={&(0x7f0000000180)='./file0\x00', 0x0, 0x1c}, 0x10) (rerun: 64) bpf$BPF_PROG_ATTACH(0x8, &(0x7f0000000200)={@map=r3, r2, 0x15, 0x1, r4}, 0x14) (async, rerun: 64) ioctl$sock_SIOCGIFINDEX(0xffffffffffffffff, 0x8933, &(0x7f0000000080)={'hsr0\x00'}) (async, rerun: 64) openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000)='./cgroup/syz1\x00', 0x200002, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:15 executing program 4: socket$inet6_udplite(0xa, 0x2, 0x88) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) connect$inet(r0, &(0x7f0000000000)={0x2, 0x4e24, @multicast1}, 0x10) ioctl$BTRFS_IOC_BALANCE_CTL(r0, 0x40049421, 0x0) socket$inet6_udplite(0xa, 0x2, 0x88) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) connect$inet(r0, &(0x7f0000000000)={0x2, 0x4e24, @multicast1}, 0x10) (async) ioctl$BTRFS_IOC_BALANCE_CTL(r0, 0x40049421, 0x0) (async) [ 2199.794698][ T8543] bond1456: (slave bridge1351): making interface the new active one [ 2199.850688][ T8543] bond1456: (slave bridge1351): Enslaving as an active interface with an up link [ 2199.877967][ T8552] validate_nla: 4 callbacks suppressed [ 2199.877987][ T8552] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 10:42:15 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = bpf$ITER_CREATE(0x21, &(0x7f00000000c0)={r0}, 0x8) r2 = openat$cgroup_ro(r1, &(0x7f0000000100)='hugetlb.2MB.usage_in_bytes\x00', 0x0, 0x0) (async) r3 = bpf$BPF_MAP_GET_FD_BY_ID(0xe, &(0x7f0000000140)={0x0, 0x1, 0x8}, 0xc) (async) r4 = bpf$OBJ_GET_PROG(0x7, &(0x7f00000001c0)={&(0x7f0000000180)='./file0\x00', 0x0, 0x1c}, 0x10) bpf$BPF_PROG_ATTACH(0x8, &(0x7f0000000200)={@map=r3, r2, 0x15, 0x1, r4}, 0x14) ioctl$sock_SIOCGIFINDEX(0xffffffffffffffff, 0x8933, &(0x7f0000000080)={'hsr0\x00'}) (async) openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000)='./cgroup/syz1\x00', 0x200002, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:16 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0x5}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:16 executing program 4: socket$inet6_udplite(0xa, 0x2, 0x88) (async) socket$inet6_udplite(0xa, 0x2, 0x88) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) connect$inet(r0, &(0x7f0000000000)={0x2, 0x4e24, @multicast1}, 0x10) ioctl$BTRFS_IOC_BALANCE_CTL(r0, 0x40049421, 0x0) 10:42:16 executing program 2: unshare(0x8030000) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2200.150876][ T8552] 8021q: adding VLAN 0 to HW filter on device bond1420 10:42:16 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000040)='memory.events.local\x00', 0x275a, 0x0) openat$cgroup_ro(r0, &(0x7f0000000000)='blkio.bfq.empty_time\x00', 0x0, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r1, 0x0, 0x8000000000004) sendfile(r1, r0, 0x0, 0x4) [ 2200.335474][ T8588] EXT4-fs warning: 12 callbacks suppressed [ 2200.335492][ T8588] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2200.381167][ T8559] bond1420: (slave bridge1282): making interface the new active one [ 2200.441129][ T8559] bond1420: (slave bridge1282): Enslaving as an active interface with an up link [ 2200.475694][ T8564] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 10:42:16 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xffffa888}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:42:16 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000040)='memory.events.local\x00', 0x275a, 0x0) openat$cgroup_ro(r0, &(0x7f0000000000)='blkio.bfq.empty_time\x00', 0x0, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r1, 0x0, 0x8000000000004) (async) sendfile(r1, r0, 0x0, 0x4) 10:42:16 executing program 2: unshare(0x8030000) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:16 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xffff0300}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2200.516020][ T8564] workqueue: Failed to create a rescuer kthread for wq "bond858": -EINTR [ 2200.720297][ T8579] netlink: 'syz-executor.3': attribute type 1 has an invalid length. [ 2200.832717][ T8597] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:42:17 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000040)='memory.events.local\x00', 0x275a, 0x0) openat$cgroup_ro(r0, &(0x7f0000000000)='blkio.bfq.empty_time\x00', 0x0, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r1, 0x0, 0x8000000000004) sendfile(r1, r0, 0x0, 0x4) 10:42:17 executing program 2: unshare(0x8030000) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) unshare(0x8030000) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) [ 2200.980111][ T8579] 8021q: adding VLAN 0 to HW filter on device bond1457 [ 2201.100433][ T8609] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:42:17 executing program 2: r0 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route(r0, &(0x7f0000000100)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000040)=@ipv4_newroute={0x1c, 0x18, 0x1, 0x0, 0x0, {0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf}}, 0x1c}}, 0x0) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2201.299723][ T8581] bond1457: (slave bridge1352): making interface the new active one [ 2201.349253][ T8581] bond1457: (slave bridge1352): Enslaving as an active interface with an up link [ 2201.388064][ T8599] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 10:42:17 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r2 = openat$cgroup_ro(r1, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r2, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r2, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) r3 = bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r2, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r2, 0x0, 0x0) r4 = syz_genetlink_get_family_id$batadv(&(0x7f00000000c0), r0) ioctl$FS_IOC_GET_ENCRYPTION_POLICY_EX(r3, 0xc0096616, &(0x7f00000002c0)={0x3, [0x0, 0x0, 0x0]}) sendmsg$BATADV_CMD_GET_TRANSTABLE_LOCAL(r2, &(0x7f00000001c0)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x4000000}, 0xc, &(0x7f0000000180)={&(0x7f0000000100)={0x4c, r4, 0x400, 0x70bd26, 0x25dfdbfe, {}, [@BATADV_ATTR_NETWORK_CODING_ENABLED={0x5, 0x38, 0x1}, @BATADV_ATTR_AGGREGATED_OGMS_ENABLED={0x5, 0x29, 0x1}, @BATADV_ATTR_ISOLATION_MARK={0x8, 0x2b, 0x400}, @BATADV_ATTR_ORIG_INTERVAL={0x8}, @BATADV_ATTR_HOP_PENALTY={0x5, 0x35, 0x7}, @BATADV_ATTR_GW_MODE={0x5, 0x33, 0x2}, @BATADV_ATTR_NETWORK_CODING_ENABLED={0x5, 0x38, 0x1}]}, 0x4c}, 0x1, 0x0, 0x0, 0x4000080}, 0x40800) sendfile(0xffffffffffffffff, r0, &(0x7f0000000000)=0xfff, 0xf1) r5 = syz_init_net_socket$802154_raw(0x24, 0x3, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r5, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:17 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0x6}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2201.443916][ T8619] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:42:17 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r2 = openat$cgroup_ro(r1, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r2, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) (async) ioctl$FS_IOC_RESVSP(r2, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) (async) r3 = bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r2, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) (async) write$tun(r2, 0x0, 0x0) (async) r4 = syz_genetlink_get_family_id$batadv(&(0x7f00000000c0), r0) ioctl$FS_IOC_GET_ENCRYPTION_POLICY_EX(r3, 0xc0096616, &(0x7f00000002c0)={0x3, [0x0, 0x0, 0x0]}) (async) sendmsg$BATADV_CMD_GET_TRANSTABLE_LOCAL(r2, &(0x7f00000001c0)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x4000000}, 0xc, &(0x7f0000000180)={&(0x7f0000000100)={0x4c, r4, 0x400, 0x70bd26, 0x25dfdbfe, {}, [@BATADV_ATTR_NETWORK_CODING_ENABLED={0x5, 0x38, 0x1}, @BATADV_ATTR_AGGREGATED_OGMS_ENABLED={0x5, 0x29, 0x1}, @BATADV_ATTR_ISOLATION_MARK={0x8, 0x2b, 0x400}, @BATADV_ATTR_ORIG_INTERVAL={0x8}, @BATADV_ATTR_HOP_PENALTY={0x5, 0x35, 0x7}, @BATADV_ATTR_GW_MODE={0x5, 0x33, 0x2}, @BATADV_ATTR_NETWORK_CODING_ENABLED={0x5, 0x38, 0x1}]}, 0x4c}, 0x1, 0x0, 0x0, 0x4000080}, 0x40800) (async) sendfile(0xffffffffffffffff, r0, &(0x7f0000000000)=0xfff, 0xf1) (async) r5 = syz_init_net_socket$802154_raw(0x24, 0x3, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r5, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2201.624413][ T8599] 8021q: adding VLAN 0 to HW filter on device bond1421 [ 2201.758643][ T8601] bridge1283: entered promiscuous mode [ 2201.777729][ T8601] bridge1283: entered allmulticast mode 10:42:17 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) r1 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r2 = openat$cgroup_ro(r1, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r2, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) (async) ioctl$FS_IOC_RESVSP(r2, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) (async, rerun: 32) r3 = bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r2, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) (async, rerun: 32) write$tun(r2, 0x0, 0x0) (async, rerun: 64) r4 = syz_genetlink_get_family_id$batadv(&(0x7f00000000c0), r0) (rerun: 64) ioctl$FS_IOC_GET_ENCRYPTION_POLICY_EX(r3, 0xc0096616, &(0x7f00000002c0)={0x3, [0x0, 0x0, 0x0]}) (async) sendmsg$BATADV_CMD_GET_TRANSTABLE_LOCAL(r2, &(0x7f00000001c0)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x4000000}, 0xc, &(0x7f0000000180)={&(0x7f0000000100)={0x4c, r4, 0x400, 0x70bd26, 0x25dfdbfe, {}, [@BATADV_ATTR_NETWORK_CODING_ENABLED={0x5, 0x38, 0x1}, @BATADV_ATTR_AGGREGATED_OGMS_ENABLED={0x5, 0x29, 0x1}, @BATADV_ATTR_ISOLATION_MARK={0x8, 0x2b, 0x400}, @BATADV_ATTR_ORIG_INTERVAL={0x8}, @BATADV_ATTR_HOP_PENALTY={0x5, 0x35, 0x7}, @BATADV_ATTR_GW_MODE={0x5, 0x33, 0x2}, @BATADV_ATTR_NETWORK_CODING_ENABLED={0x5, 0x38, 0x1}]}, 0x4c}, 0x1, 0x0, 0x0, 0x4000080}, 0x40800) (async) sendfile(0xffffffffffffffff, r0, &(0x7f0000000000)=0xfff, 0xf1) r5 = syz_init_net_socket$802154_raw(0x24, 0x3, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r5, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:18 executing program 4: connect$inet(0xffffffffffffffff, &(0x7f0000000000)={0x2, 0x4e23, @initdev={0xac, 0x1e, 0x0, 0x0}}, 0x10) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) [ 2201.936897][ T8601] bond1421: (slave bridge1283): making interface the new active one [ 2202.008260][ T8601] bond1421: (slave bridge1283): Enslaving as an active interface with an up link [ 2202.017781][ T8604] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2202.084023][ T8604] workqueue: Failed to create a rescuer kthread for wq "bond858": -EINTR [ 2202.267523][ T8623] netlink: 'syz-executor.3': attribute type 1 has an invalid length. 10:42:18 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xfffff000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:42:18 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xffffa888}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:18 executing program 4: connect$inet(0xffffffffffffffff, &(0x7f0000000000)={0x2, 0x4e23, @initdev={0xac, 0x1e, 0x0, 0x0}}, 0x10) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) connect$inet(0xffffffffffffffff, &(0x7f0000000000)={0x2, 0x4e23, @initdev={0xac, 0x1e, 0x0, 0x0}}, 0x10) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) 10:42:18 executing program 2: r0 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route(r0, &(0x7f0000000100)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000040)=@ipv4_newroute={0x1c, 0x18, 0x1, 0x0, 0x0, {0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf}}, 0x1c}}, 0x0) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2202.442088][ T8650] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:42:18 executing program 4: connect$inet(0xffffffffffffffff, &(0x7f0000000000)={0x2, 0x4e23, @initdev={0xac, 0x1e, 0x0, 0x0}}, 0x10) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) [ 2202.558041][ T8623] 8021q: adding VLAN 0 to HW filter on device bond1458 10:42:18 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='hugetlb.1GB.usage_in_bytes\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2202.760750][ T8626] bond1458: (slave bridge1353): making interface the new active one [ 2202.824956][ T8661] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2202.843454][ T8626] bond1458: (slave bridge1353): Enslaving as an active interface with an up link 10:42:18 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0x7}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2202.870327][ T8648] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 10:42:19 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='hugetlb.1GB.usage_in_bytes\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='hugetlb.1GB.usage_in_bytes\x00', 0x275a, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) 10:42:19 executing program 2: socket$nl_route(0x10, 0x3, 0x0) (async) r0 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route(r0, &(0x7f0000000100)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000040)=@ipv4_newroute={0x1c, 0x18, 0x1, 0x0, 0x0, {0x2, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf}}, 0x1c}}, 0x0) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2203.131130][ T8648] 8021q: adding VLAN 0 to HW filter on device bond858 [ 2203.154368][ T8665] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2203.210442][ T8646] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2203.230464][ T8671] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:42:19 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) r1 = accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r2 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r2, 0x0, 0x0) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x0, [], 0x0, "7464fbe08eb369"}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(r1, 0xd000943d, &(0x7f0000050080)={0x8000000000000000, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, 0x0}], 0x80, "7f3bad9a41af7b"}) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000100)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x0, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r4}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r5}], 0x0, "7464fbe08eb369"}) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000100)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x0, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r6}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r7}], 0x0, "7464fbe08eb369"}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(r2, 0xd000943d, &(0x7f0000051080)={0x1, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, 0x0}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0}], 0x72, "50d84211be02a4"}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(r1, 0xd000943d, &(0x7f0000052080)={0xb454, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, 0x0}], 0x5, "023e71e1659deb"}) ioctl$BTRFS_IOC_TREE_SEARCH_V2(r1, 0xc0709411, &(0x7f0000053080)={{0x0, 0x8, 0x1, 0xffffffff, 0x5, 0xfff, 0x200, 0x3, 0x0, 0x1, 0x40, 0x2, 0xfffffffffffeffff, 0x7, 0x1f}, 0x8, [0x0]}) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000100)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x0, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r13}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r14}], 0x0, "7464fbe08eb369"}) ioctl$BTRFS_IOC_INO_LOOKUP_USER(r2, 0xd000943e, &(0x7f0000053100)={0x0, 0x0, "8709b3548f1eb64ce95d6d0ba0ef38fbc672aca8612dbb22178a9df59e1c1141098189a7109d63f84e3937aabbdfe0b1b52cd1cb8ba62dddb252dde54e268e36b92f0c724694196bba9637007ab95224bff3bd527550df659dd891459469acee8b049f7c64093d34a54283306d0a24ed523e191b54e82ae5b902bc56d6b2ac74e74d5c5eec3e71b0af05e31c851cb9b43a0bd9bd8062aff918bfb944ef10ad6cbece6f2422bba558a88a2ba9f8274d2dd6516d89e77b1db643bcaa439cb0a9e592f21f5d66b80f88b94b5eb2493eaa53557718fc237c999f26334ebff2621e25cf259f9e23947a5700c265b9ca8f819400857b1be3ea29256922772dff33b106", "9ba14b323cf103a12a354cf3f6036eb4085410964932d1b45912363452387fd0c7a67466e79ee3598432b5430f04482b1144861bac4deab1bf66fce3de878e76b4c5538c2b81996c4f6a06ad77d311d1350e1be9c1bb07d9d8f89d386ff16d1dc7e4189ed3180f8c9748978431465f7dc3f088f4f2cac2095d3d4a0fe28b544aeec612e4a6b42a59518b6551972c868a5b277a2c9fc840d6b2d15c5bf4943fce8c7d8e49f98a2b8a360922989cf375fcdfb70923693e34516dac5c5bebc4958812307fe457ef1bcdbff1db08d0237d1690c3fd9e73796a1524e06d402145ca6510219ad950a4fe1a7f33e777bda03198360c9b0e4dd5f40d6b4330e8c84e7bf10fcd8513e3031be6935666b73abd933527cbf8c776cea2d62b836a3a44f9ba6d27e6003e958559059dec4f8870f941390885aa5ece3f675ca2a2f8314687fe5ffcf7a3a9a84f2a04fec29403740c649247de911aac67effc0f488377358187656154c467d21e281279c73199fee29e167ef4a03ab65db6052ab35ad88eb2d0068e605480432b026cc53c1dfcc50e5c20d695f4ef97f89110a8781a9b0ac738f9d0ef41e5e7350041506c24801be8519143dc5df4e7d21b711f9fb61123a093f261e8f43ec8abc265be78d14aae2adeb1dcfa5a659d59ac71a3729e4ff01e80db03ce35ad4e785c879a419a227b2c1f88e673d094f2105e430a90e154368c1055baf1b2f4b7d7019e0593915dbe19bb04d88aeb3c569fae5e8343004c492dddf09bdfdb8081c22c541f692dbe90bb11b790a3809f64c9d318f7ff0a75579699df5c9dff025e90a5212732248c32cc021943e620c912cbc7570432739b935dd3ff339311ce0caf091a315b3b7a7aa3358fba72eb0e9aa3d38225b0fc83d17eb266bdf35a88d999b73a38dff769ddd6dde24b147c00ce2dddc1d2c120fc84538510e529221f713065e17aae26916f1c986e08ecd902a0ef0ed839eea49372890518765c7a310df75eb3b9b624e758821e57e3e9dde7098ab51fa20988b3d50274cc77efdf0119c8b3fdfd08c4357c77a8db037efbd4768f4f0f58f69c8eb6bafaab0675bcca1e1d9b954c2c287d48ac89ef0dc3151b32eb296f4a61ef7d2154a1ce35586f422b726d6e505e4bd16fc7436db4987e4efaebd751358daa4e035b2122d82b7cd14463e3534cdf77c816892ed0ecf8ab4dd8529e1065571aecdbadbd12e6c8f904cc2eb25aae6be9ae582792fbcb2c5a889caf85349d9c967f7335105c76bab18193f3a9706f75386c7d13b7baeb685129c740f659dd30113ef0db36d9d9efbd254ff931dc8467c59508e8e5cdef7744ce87612d7429fe265b12f781fac3cc54ac21c90288a06b87a375109f840c1697308fc3fc8866138fb2992889acb8376a20120bf77b5cc0064dc231224b616bdf76f9abd85b7617a4cefcb47079cb4563e30a76d0a07b7c77b776074c3e876369a49c3a8ebb35023e4c1ddb9eead70202ddc7f551e8e71cac9da7ff057ec990bd0d9bd8e410e0efae3beff0a3473ce946ccc657737292cfaf267b22e40795a9bf6fe1a3a6ede42d34ad5426dfe9c19eec6224daf1a3a3f7b5d90697b2969cd20550a47b8c359daeb733db36182edaa7addb4ac903a59ecb5a273a85eca1fff65a164a670ca17079eb8f0217f31458ea6aafba6c621d8c02bd430af3f479bffab8d0f01650360ef80259f3dbe819600045507d907b5ed06578a1fe48873c6caedaee50f54c27389cef592128775c77679f70ca46fe34685d7c6c89770bd4c03437384df0cdca3c7dbc8aa148420874838b50a75f8c530a9ca814eb95237865a44704d3184e0a8e2010e9407b1fc39b0194769bfbed487159ea96cf3b3381d4b5e90b687d15f3f26fd081a01ccc7d6da891d7d54fd5e119a4242ab297cca7c538bebc5b1667cc682f5d65e076cdea7c5273de8c7c38f21a0caa7c59dd15ecadd6b91f4e4e493b059274d62ea765e7ef0b1531c311e9dd94e97f27fb1fa4ccc759f5a8c7221a393249195e835ba8c4cc7cc89f792dbb5880e742011930e7df0c03384302587dc775a6ef98f1c805db42b9fe85fea271fea1f8a65b8086d7b446ba89e20003c518fa67e5ecd60fa4fdf71d2f6ee1f966bb9d71086d6cd9e362c4c89066b1c0a0db65799c6d48bf50e5c37e5987317fcc51310112c4738715327fb85212b1d4d4b65130f7bb0b6c8fe645b95069cdd5a1ee7515c386fae0ff2cda3232233a9883545b010b83bf58e7ebb83ed5fc165ff871b4f8bd92b3c2672de94c1f8b82aeaa4bf8bd7321df231097ec281cc8322cb6e348dfe7d46c2db5554ee5f610191b386df55958c758f8c8edefafb481149f363058b1301cd633262e78ac19b009a721c9cc43256e179e67e1b60d26bc003a94ad08e170958067633e6d8cecaa421f1b9f85b5959627f819a97df9665c1df3fb21bf2a80f52fa35ea1ad480aa2008bd80ead582c829198b4b15bf395d4ab04a864e3558e2071c2b0a4f4513379d5ed9ad63588000b845fed3b4d2f784ad313de2cde22bda6603c3810e07c4e499e82b5db49345a06a05ca07532c509cfa22c33a41ccad1f50392a8fb951fa8a05f1afdda8b646fe4ae713d779479a3fc6a38ccecb2d86e94faca2da81ff07723ede32935ef70835edaf6cbdcb1759dc6ea0f02b5e8a1d15eb6039c763c342defe18136440359c928508cf05056e35c920b0763a9c6dfef28038b799ca2d9ba1dff299328a02833d039a2b193d70983ee00df893a6ef16dd706e4452c1975ca82d26706fddc6647500b4e52df90e16b78277a1136ab3032ef743de5c7803718b5ab662f00b43301ae9d958b4dadc279cdb4e9696cf72d7f27a236647b0b519c5cf6c63ddbadfdb3117c1a877c21bdf0aca790461bbd778a193c6c3ef93f85737aa251db53d3cebbae6f65187bcadf014dec0f8156d4f090db8c7711e630bca709d368452e0b01b6a704a8909f19c58d899ac5d59a4b94d1dbbe92d4024024e054bd0b1432654e45b6534bb0f1267977a9a3b9355b370235f7e5f0284b9503874f9bbec654e6df5dddcb46eae1308d9dfac58270ac5ac6e0c40850b9ca838907962a8e5bbfabd263af81eb7732da78d5d07df79b47215dbbe793a0d09adff694069daa2b81f8e1124cfb177b3155363dfcae894a4fddc33ab37dc0a3a1c587f185935fe8078f04f88ae73c2ad47634b92d55e29787b1e450d8e2c3c303fac6738374d5b335b01c9b96c60a171b89310e9c978b7345bd1d923479df58f4090d6733ccc22fccd0520eb1125615bcf3a6e2906e9d729a7e53c3eb51ce07c6c3d48101d9c41d2eaf0173227340296ddac690520d7ccc29839a22286ab54d85f14d3f6377349b0b234c9f563aabcd2132b86eb4689c37f25522f5a6f28c5c906d75c3152b59957e909208d55486665b109fd32a7ccf62e21eb7d4617074427f00d82efc15c25dfc10c9ab8f3645c74d43268671cef77b0e9819665e7aae949622529cc3b4a72c8bb8a4bc96dd5f7d810f7dbd7e169f601323ca40c950978eb86582261f7ee5cd4ea8a6ec8a406e11fa842119fc8cf708afe77e2cfb1661d8ccad3052b857e80a38afe5b5897a93579b0d7eb57363fd1021271df4a27a0c36309053b4e1039d8850ab046e9e82cbbfe9d575868eaedef9dbfc024182eef71d95f40c5b8df79d9e26ce7f026181dd1cb5883da136eb4231155b2e291658d8bc35be772286184a577a2637ab9e42f1326e42e2fae6f4be4f5983ad65dbc279a50ca6a533110543ef0bc010d0640b0b07266f6ec2af24adaf696f206d944d656d9617b7a9a692c1fd6ee4d45fd90901135b448e62c8c1eb5da890e2a56a210b480871237b447ce329e05cded0baaa2aa0ba109afb3f7ce903638ded9ca28606aa128597cc593ddcf346f380a80b0f3a79be45037deba2ef912815896bf4dbf93894838bb8448be8b7b4814a2dade78d94b3458301cfaff3302c966acc482a4b2d3791a185531e8f7f0903396af0b9f63d8c67ce50626a4eec6514e6b8ec5e256c8470c7fa1aed96e588dff58d29bf19cc93caad39b06f8f24500b1f58e42545e0167bfcc0150c9e78e131f7c5ef206ff34566cc13ba118c901598b96b257d9ae63b7ca915c6a9238a20ac915f7731d83699b28936a1af77dd6e5e84fc223213d1cbfaf1709b7442db5f6ae342e0d577d2485ca70ab6235fea810806fdca8e568605a884f2dafc5df681f42f38df16ee2c7ac0e0d7e3e0b4d67e035184c34706ba0d30d3af2798126893f695c3f028ec61bab258c24828a0bf12fec8e2187f51b97a97730093aeffeba2f1c018a23aa2477cbb09fd873b06f64fcee8a8a85c7b24b606e147b677dc7ee8d15200aaa8a7cd3db44f209bf37ea2a3d34302ba8847afa30f8dedcf48379d950c0071f85b7020f1a68ac38711f646cc060cf0a48a98e67ac41c6591997ac8d435c0a8d3777ce0ee95317024131cc09fff269a40fb57a084991aad175799dc3c190657eef13d84bbb00238c4a4a2ff8722ee144e6f4e6d2aa50e162e3e4d9eacee60e67d004ec85a089b275bdea557378d93f99ab992ac91bf7f2e22017bd911b9b4a53fbbc1c5c9e7a26ae6984bb7792f5a0afb59bbfad31ff4f1dd26d0bf968fd4b58fce4e2c297cf282d4d4487bd8b3632a1155968379725bff540a250ea3e75ea1eb1993acfe08f93bc27e0ca77b84962a39ad82aab9a0364d7a6a65e7532d9ea352d2fc3634510120c3a07a400bd56df587b052ba52d480a85d77e7c596247b4f11f40106f4a78c798fbed95fb21e77f44cf29304a37c5ece4e6fc8aac71734bad8311e4b2bfec6ec1584c9a65e3e0109d21ea5b3718f1a848fb39226c3d86916533639e30752e97d176644fff022d1f3b6145ec1f483ab1d255b892adf7caf17d3673be238d8debe86be639daacb46450bc2e1654456c9b133fa5687892f52831a09add8135dd23940ae929071cb24d922daa1fc4272fd69140a236c640b8ed23cd39054ad5d5596a155043e1e1b8bf7e8a39f19261aec8ef716b0a7e6bbf544008828dedf217c1222b3f5a717aae4283042a0995582fbd43820748e9cabbd764027dafc6b9978192e2e40e3eaa4b161ea598c0a66080029b9d5080d36a1985d1e74120c6cab96c9d0fd36f9c35e0694cf62ae7b9e68a71f288faa7c517337192fef3910c0ad7ae50a55661879b0bfdd165356d5510c457c63f203b177b13fabb17aebe267c0b0b6770e5636f907db249e9be6ddf29540b8c54f03d90bf093ca4e26344f677f46322f1ba15631eb6d53a26b4eb5a6543e7058018637d26a7419bc88a2cffb75c7edc450baf586b0c9927e71a931712e0bea91da8acea9782213cb605e5dad45abc20db609aa8e521fdd418b40bbfa"}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(r0, 0xd000943d, &(0x7f0000054100)={0x2, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r9}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r10}], 0x7f, "8eafcf6cc17ecc"}) ioctl$BTRFS_IOC_INO_LOOKUP_USER(r2, 0xd000943e, &(0x7f0000055100)={0x0, 0x0, "5e16ccde4c1a08bdba84ee91790e49783f0a7478a54e551ac65f9b3d31f9ffc7af2e0e6033f94e15d2d853c261dd082ec6442c5f52b582a8ffc046d3339fa1e265b27ab800c6665b61206a63130f7c6c460f31c652a6397d83af950061237d8d6efea2ca170646955b70f7640f43bbbe6e47574dd28d0b5e733a2bde20df898dfaf3dcacae855b372c53d3b1925424c99557f1d1783c8615f1aacab2dd5c98e375fe57cd9dc7ad19f8c2df4e304e2f7aa98c594d0c5543df5657302cdd19f5792e0997467768d12ce18351bf510906fb7354d8f11e8acdc5aae07834afdc35f92f47c6c7ee6ec6e2266da6b800fdf6a0752abc094c76e97b8525801d2975a9e6", "a613d3a620061e41ecc6d22192027f0600f13acdcf0b61975da21f83d773dac58f251e09e7c7b8c3b703965a04e8c69277ef9b630dd7992b7434a1db50587e49085694fe751e78f942ebcaa72460ccb92db41b333faca28cb7ab7eff64f33a427a4c3a704958343769fd01e86c34fa145747c9c6c64f9625b52b3bccdb905fa4da0bfcdcfb2f6c0cbe00234c4cea132f63bc3729cb21510268e865d8d3da84f71b0eab1415931d3da93c0643a01e63fa7031d97a42244b9560b1f926ab12348e4b903e93bcd293714ba11fac07e858272018e3a7bf9d6de0f682287e41a604935ebb1f33886e6ed3e6977de8650d40c1cf774da3a2d50ac01100f9cc584c2c8c693fb7283e25db7c1684682cab8ef2a61876c691b3c32ec3415dffda1c7c91eefd5c665b0dc89a5e9710477550328b842fecb3ced95488369019c987d4e2237a79f0f810f7ac1bde623e56d93a06f6945ef709d8810fdc5fd049e1b5053b2caa0547bcd8eec49a3305c5f5f63f6f68b2e8c9a15437a8f4726862fcc9989a425a475196de989ea259869233699320bcbff7908da4dcbc763f3f5da206b1fe65cb2f2a23938aec8bb4167865d11ee30c4b4ac1472898c4291e9fccb994b29ec12d1176776fd6b31cb0a4a36db894f2698160d89c9c07f7fd71912a2aaca6fac504d6a5731ad413701f734ba7f2139898d50c9b413524333821b0432f605e63640e5f9724679b64b558f0602f7475e5a57998be83977f95560859307ae15bb526391d8048f2733715ed144a63fa4d7154820c882a2c06ed41cf0d744640a498f952739db395f446fa40378d106afcff9f2cc6f0d973951d2c3807d7e83c79a4bd9da7319d29dea051c926a26b74b784412d9c40148ce44f5bf8d62549cc6a1a34f8d806d2891cef6cab75ffbd01b15e2dba49e2b4a3b632ca178e286e84c21169c5158b8e02c355b5b82a0a04c6667eb7927f4b5e623f2d7942609477c9c55925869785329181ccfc921ca0b425c8acb21874db3c3ebf6dd9d2dcdf6f4948f497448c5db597cc7303bb72b5767796ba2a28c00dce3f3b2515d808d9247acc9890b3e4b8af57c84271e15db787d07fc15dd43f3912678be6f9150aea86da86a973f00ad7523f691307a2a979dd81cab3dc47520028088fe1180c703cc8a4a15205b33e2cbbb39a74c027668b5dac4de55ac8c9c040556bfe070c41305a69b6298b086c7e181799709943bd6f0494cb6c4ad6146dc351ed0c639226257c87f049723f8a039b21557eaef3091d9a9afef59f63e48cd5acdda426fdc53f8791e4d5d8a7aa41f3ac4fd2102c56e402a88ab9a6c8bd1f641012a0b585020cbf0a019328bdda3c234c27da617f08558ab7392e0c0d4f39d96a3a763e65f5186a4dc00b06fa690bbad5dd62c760640f49036904b38ed51a02915342b3421915bc2876fb855a53e4a1592bc6da2b506eab435dd8e4db752be0481cf16110ae164b396e40ef6f4bb6a8e81ba8979eb877a21bcdf02bd4c9a6745a36678499cdd425847c8fb8b2edc61bfc1c00018b58172edb3d3928124e7f61e0e7d296716b96ecbb0fb2f3b95fee28dbba2057f76c70a4cf4e5ca323c7f01598adef78f37337506049f2a668575d5f368803fa77dc8a20caf945cb7fc7b42fa20f964ebc67505e3f32f84712019722c9f2d8e55e8bd5a9849083019f9784b588c97c0b17043e217b4d48564dfa5a4842b06369db286d81768f43c1c5fdeec8079c82d238a45121f4d26d27e41422c2f65aa622b464b2191cabedc550dd1eae7107a8795589d3940c14d2cac91a631dc5e609fbde4d1ddc22a62d8e05a5b146e087a4093bc0c838709c825f7b60a8bd621418847db1633c14e52158644b87f56856f57a737e28c0ea0f6435b8317e3e40ce3a23f74e444bbc3ea6b879c0bc05562231d90a647c91fb74ceed0ccafed62a2358b4f328f2676b7d6d8732f27156b3c2b33561e2f34b2b79c24cadfa192f7fabb592226c50b91b142539f8697f65ef18a00f6ab4a42fc21cfef80aab8268c917d0bdb548c5fcab248dc011d341ed8dc8f9e4268fe53e60ad1b8b6e1db09c4ef963c0c88709d2ede8971f6bbabdac364f13f68f00a4f471bf01824bc898101d864b75dc563eeb20ea72d10b71e9cea09a81a4e7f3ec44fdf184947092d1cbfe4318d4d03a8b7b1fc5ba9b87e87e7330659b8efdea0c28ddb6cfea6841c9fe6dbae82312904a80719fa890afb53a12f7d8537d7a961bb0fc1fec1cc26f055dd2ca1d55df83c82b29efc2f20ad233109183817d3c0c0b6cc1b8c3010b5d6215301dfd5d2a780fbfefd358a96238ab46f31c8468620a742bd6ab6ddcd2e99b5a715c5952eed8840e181ca59616546f7366e7304e5ff692d535aaccd826bf847b81ef192c6881e2806dfc451047890c634f9cad41294c34810c5e174620fffb1da7ae1ff1519734b58dbc503f525e33484a3e72573300d7691be08e0076c81af43539498ad219216eedb68b814900f73b20b3d408dd2db47e1e183184d2a393b3c17880df2fbd9d70d480f3232bf44759957a4f30d90e5c07c9671b0895e230c02393cf77b66a6b82b7217794e7570ad19af08d20c764b23f53dfc68da5d277306c9efb07e22cb0385be9e53dfae11d76657a4473deb217d440fcba8b0c6df0f9f69b7f5fca5ae82d13ef47c8a6bca4429dd427381df3b0805b7233d4090bf50d5a0ca0021ac5468910f704d1285adf04b067d6ccd117996f1d7d515dc89492b041a10b289da3414f13dc624699e2c4da9eb3c82391e413def43be951da45603786a750a16a1bb3a0ff79e54a399156afe05be380d344e25f2e4f09fd3a6c8eb2f4ffd094c484baf37e411a8769f52d090dd648b33e7748e38becaf952ccc68c152f0718a50a32960a2acd942581491207bba5da76e14fcfd1df00c6141dc919652f83a18b7e3e14aef608687c3538e6610368a5e785b5fdd42b9a4c25d80682857e6666a760650d32ac5b0d2fe8fa017cbed42e2fbce70ad29df7e55cfcbac1bba7d4f14ae6a2e3699fb9841738ae1205f92297c4ad682c6e4e3daaf0a6fba12fd86f839c8687322200402e389431dece6ad8a1a98367643a8fd14c7412db7b1cecebcb6509943c35fdf6f5b2ffba52433c31e4834a8100308d6dd6da4fbf8f4f89b11f29ba1a402b24de54c89868b53530b10b2abdf46894b6c56b834d5ecc80b39aed2595852fb7bd4e61f51a9dff599254e308a5659f09d4b1a4146ee71aff92602b13521f7b5dfd0fcf61f3930b75e00329ad6852436bebaf3f942533139ce9bacae3fa2c98ab96d2f5ebf7425dbd4c5506e188f4dfd2a7e61fd4fdd48c3a05ac15c7899c94d94ce3aedc355bc88dc2cee43972b846fec5b4a8376914262904bcc71a0983f452e1c03aa4a1625419609ae29230a71f79eb5032d23a44b05b42e09e0e7b65939e85b78c207144155028ba7bf9d7ba8c701e691a8b501738a8ae1b56c47486ed12c8e3a0ea85fa41d2c13429b68676d84bf196b7cc820f59bbfb322e8a21dd623f34f9f842a483e96c91fd32fe54c22ff74e521ee577f93e660f30e2a78f2685781503e7e1213d99d2fe2067feaf2c86f78cc31aeb4e4592196a44decd3cca66ccc0091b3240ac90f63c149939548ffecf1c921d73b7f70b65faea19db6892523afa4052c8b98021194b21b80f22102f8d9c26a6aca4d2719daa55914f3d382cb3e4405caf27ce75599e90d00d3c17c553a446366ba2b160f794db63fe6c18b271bc59ea29707b2be1cedf33b869d955c353b7e4586dbac773ad121650c05546158243cc681b20b5c3b3e8c823c47939cf28c1ed5436f3c983b68f51ec6ee4ed28db2b98f3756193f77cdc4b03bb7f697b1abe409da547b5f9b7c27f005969d0780585bde07aa40542ee9306dbe25e4bb06cdffa9f4e45b3b1cc1599c98596e420477c8ab32756b7a94ea0ea660ee351155951780accac4a86beb9856182eb5284fe82861aa59088de4b4ec8295e3d0ae43aa6d36af1cffc1ab74b3e534394ab139d616ab5dc7d3c3c26f614d7dfc127190065c390277600e4b428574d8441d7eccc0e1bc13aa43c03f29d5e36b4dc31f1659854bb8642ce07b9c9a4321cb775c25363a83a0eae3750face88cc50bf749e10ebfe1f6c9408059c99b32de50274ec965993ebaabb6813d8a403af459027fdfb9218ba4c976abb6fd9a051b0595286a2d7ae12afff818c52aac8d494a5d3a2ed7352f207ab8a98b443e65487afd25b4b5552f20ff96116849a54bfa778e3bbc83857f8353aff48238ca1823185fcc167e8f07be353a5dc7c381f71741206f29951f7eada6fff65c495755efcec0629f8fa4c0df50e7f1bbae8701be86b27542ba64f1819ff5c841079c8fa06b07c3fee32f987eedb2c9e17ea399325739bf975bc56e2af1ad33b7d09ada3fe55493c2cf856f4cdc1590f152c17067a1b4964afe6e96a16dccd9658c15a9b011ba901fa80da95681748fa2c5302f8b21b76826470e760311a14223b14a9f16eb2fa7eee63b4a7af8dad53375498d54e049011134cf648ce1c59127969e330d762441c3e58414088c1e1094c4fd6ec6985bcf6cce93340d410e72667c6a934ed4717800cd7acc37d238d5967e80c502ded7326032fadb0acd77d0e38aaeb1504fd9e660f94a06318eafeb330fba065fb7b452583554d276face76c6bce147e56514ad22e22457d4c6e577c321f9920de666710d84997e155aaf836de39e8c985c8e32cd9d996b9bc1f323b271755bb584260e90fe8c247dd929cf8a3a1b3cf2a0993d366007f7e3966c411ec577fac875f7a929da4f32391184817b8aa9da9e1cc9037e725f18efaafbb08d520faa8c80c0fd3a8ddebcc655431f0456ab6569d6d3f13f3cf36c3ed6d464b185ffc79b285dc0ae4a1f3dc2db072618b9ea21d2725e15c1f3f1e30fcfa204bafd7a0980d0963b9a49303d79356c9aac2e071f393df48106a751341ff0b61e1e4f092f9e461e5387bfae70957853919e32b15e152a2da097fa70b486c14fb45a23c5a20b53ceb9a08285efd0a73a65e8cd14d50cf5e01e7fd3177ed26f1394a0734a2a9242a04285571245d18d6f70bb0287420d0a0b1a2662b3b97dab6c18cc132f990426974bb6531c1dbe4ab6755c7c89987314513d8d1f054b5ab737f0920c32327d1310c2d256dfb49ee38dc98b751b67c988d79638cd9a2f822102402f028b676051920bbee264e0b7e4723cb82ae1af169443bfe16a265daa791e249da8efddbca2956b4c04aa1e5eeb0a8c354b5f3fadefbc1f8f33b96c19d76bf879d92951c38608a9142f899250dba80791d0a62a1b3b60b5e3a8f387bfedc40f60dc5e9341f0a51bc8151e5445c132dad069ee1f1bcded0c3ac2092969"}) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(r1, 0x81f8943c, &(0x7f0000056100)={0x0, ""/256, 0x0}) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000100)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x0, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r19}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r20}], 0x0, "7464fbe08eb369"}) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000100)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x0, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r21}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r22}], 0x0, "7464fbe08eb369"}) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000100)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x0, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r23}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r24}], 0x0, "7464fbe08eb369"}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(r1, 0xd000943d, &(0x7f0000056300)={0xa2f8, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r3}, {r4, r7}, {0x0, r8}, {0x0, r11}, {r12, r14}, {r15}, {r16}, {r17}, {r18}, {r19}, {r21}, {r23}], 0x5, "0467da414c43ce"}) 10:42:19 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='hugetlb.1GB.usage_in_bytes\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2203.260902][ T8646] workqueue: Failed to create a rescuer kthread for wq "bond1422": -EINTR [ 2203.390790][ T8656] bond858: (slave bridge1013): making interface the new active one 10:42:19 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xffffff7f}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) [ 2203.464095][ T8656] bond858: (slave bridge1013): Enslaving as an active interface with an up link [ 2203.473383][ T8681] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:42:19 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) r1 = accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r2 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r2, 0x0, 0x0) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x0, [], 0x0, "7464fbe08eb369"}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(r1, 0xd000943d, &(0x7f0000050080)={0x8000000000000000, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, 0x0}], 0x80, "7f3bad9a41af7b"}) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000100)) (async) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000100)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x0, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r4}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r5}], 0x0, "7464fbe08eb369"}) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000100)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x0, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r6}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r7}], 0x0, "7464fbe08eb369"}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(r2, 0xd000943d, &(0x7f0000051080)={0x1, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, 0x0}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0}], 0x72, "50d84211be02a4"}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(r1, 0xd000943d, &(0x7f0000052080)={0xb454, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, 0x0}], 0x5, "023e71e1659deb"}) ioctl$BTRFS_IOC_TREE_SEARCH_V2(r1, 0xc0709411, &(0x7f0000053080)={{0x0, 0x8, 0x1, 0xffffffff, 0x5, 0xfff, 0x200, 0x3, 0x0, 0x1, 0x40, 0x2, 0xfffffffffffeffff, 0x7, 0x1f}, 0x8, [0x0]}) (async) ioctl$BTRFS_IOC_TREE_SEARCH_V2(r1, 0xc0709411, &(0x7f0000053080)={{0x0, 0x8, 0x1, 0xffffffff, 0x5, 0xfff, 0x200, 0x3, 0x0, 0x1, 0x40, 0x2, 0xfffffffffffeffff, 0x7, 0x1f}, 0x8, [0x0]}) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000100)) (async) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000100)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x0, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r13}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r14}], 0x0, "7464fbe08eb369"}) ioctl$BTRFS_IOC_INO_LOOKUP_USER(r2, 0xd000943e, &(0x7f0000053100)={0x0, 0x0, "8709b3548f1eb64ce95d6d0ba0ef38fbc672aca8612dbb22178a9df59e1c1141098189a7109d63f84e3937aabbdfe0b1b52cd1cb8ba62dddb252dde54e268e36b92f0c724694196bba9637007ab95224bff3bd527550df659dd891459469acee8b049f7c64093d34a54283306d0a24ed523e191b54e82ae5b902bc56d6b2ac74e74d5c5eec3e71b0af05e31c851cb9b43a0bd9bd8062aff918bfb944ef10ad6cbece6f2422bba558a88a2ba9f8274d2dd6516d89e77b1db643bcaa439cb0a9e592f21f5d66b80f88b94b5eb2493eaa53557718fc237c999f26334ebff2621e25cf259f9e23947a5700c265b9ca8f819400857b1be3ea29256922772dff33b106", "9ba14b323cf103a12a354cf3f6036eb4085410964932d1b45912363452387fd0c7a67466e79ee3598432b5430f04482b1144861bac4deab1bf66fce3de878e76b4c5538c2b81996c4f6a06ad77d311d1350e1be9c1bb07d9d8f89d386ff16d1dc7e4189ed3180f8c9748978431465f7dc3f088f4f2cac2095d3d4a0fe28b544aeec612e4a6b42a59518b6551972c868a5b277a2c9fc840d6b2d15c5bf4943fce8c7d8e49f98a2b8a360922989cf375fcdfb70923693e34516dac5c5bebc4958812307fe457ef1bcdbff1db08d0237d1690c3fd9e73796a1524e06d402145ca6510219ad950a4fe1a7f33e777bda03198360c9b0e4dd5f40d6b4330e8c84e7bf10fcd8513e3031be6935666b73abd933527cbf8c776cea2d62b836a3a44f9ba6d27e6003e958559059dec4f8870f941390885aa5ece3f675ca2a2f8314687fe5ffcf7a3a9a84f2a04fec29403740c649247de911aac67effc0f488377358187656154c467d21e281279c73199fee29e167ef4a03ab65db6052ab35ad88eb2d0068e605480432b026cc53c1dfcc50e5c20d695f4ef97f89110a8781a9b0ac738f9d0ef41e5e7350041506c24801be8519143dc5df4e7d21b711f9fb61123a093f261e8f43ec8abc265be78d14aae2adeb1dcfa5a659d59ac71a3729e4ff01e80db03ce35ad4e785c879a419a227b2c1f88e673d094f2105e430a90e154368c1055baf1b2f4b7d7019e0593915dbe19bb04d88aeb3c569fae5e8343004c492dddf09bdfdb8081c22c541f692dbe90bb11b790a3809f64c9d318f7ff0a75579699df5c9dff025e90a5212732248c32cc021943e620c912cbc7570432739b935dd3ff339311ce0caf091a315b3b7a7aa3358fba72eb0e9aa3d38225b0fc83d17eb266bdf35a88d999b73a38dff769ddd6dde24b147c00ce2dddc1d2c120fc84538510e529221f713065e17aae26916f1c986e08ecd902a0ef0ed839eea49372890518765c7a310df75eb3b9b624e758821e57e3e9dde7098ab51fa20988b3d50274cc77efdf0119c8b3fdfd08c4357c77a8db037efbd4768f4f0f58f69c8eb6bafaab0675bcca1e1d9b954c2c287d48ac89ef0dc3151b32eb296f4a61ef7d2154a1ce35586f422b726d6e505e4bd16fc7436db4987e4efaebd751358daa4e035b2122d82b7cd14463e3534cdf77c816892ed0ecf8ab4dd8529e1065571aecdbadbd12e6c8f904cc2eb25aae6be9ae582792fbcb2c5a889caf85349d9c967f7335105c76bab18193f3a9706f75386c7d13b7baeb685129c740f659dd30113ef0db36d9d9efbd254ff931dc8467c59508e8e5cdef7744ce87612d7429fe265b12f781fac3cc54ac21c90288a06b87a375109f840c1697308fc3fc8866138fb2992889acb8376a20120bf77b5cc0064dc231224b616bdf76f9abd85b7617a4cefcb47079cb4563e30a76d0a07b7c77b776074c3e876369a49c3a8ebb35023e4c1ddb9eead70202ddc7f551e8e71cac9da7ff057ec990bd0d9bd8e410e0efae3beff0a3473ce946ccc657737292cfaf267b22e40795a9bf6fe1a3a6ede42d34ad5426dfe9c19eec6224daf1a3a3f7b5d90697b2969cd20550a47b8c359daeb733db36182edaa7addb4ac903a59ecb5a273a85eca1fff65a164a670ca17079eb8f0217f31458ea6aafba6c621d8c02bd430af3f479bffab8d0f01650360ef80259f3dbe819600045507d907b5ed06578a1fe48873c6caedaee50f54c27389cef592128775c77679f70ca46fe34685d7c6c89770bd4c03437384df0cdca3c7dbc8aa148420874838b50a75f8c530a9ca814eb95237865a44704d3184e0a8e2010e9407b1fc39b0194769bfbed487159ea96cf3b3381d4b5e90b687d15f3f26fd081a01ccc7d6da891d7d54fd5e119a4242ab297cca7c538bebc5b1667cc682f5d65e076cdea7c5273de8c7c38f21a0caa7c59dd15ecadd6b91f4e4e493b059274d62ea765e7ef0b1531c311e9dd94e97f27fb1fa4ccc759f5a8c7221a393249195e835ba8c4cc7cc89f792dbb5880e742011930e7df0c03384302587dc775a6ef98f1c805db42b9fe85fea271fea1f8a65b8086d7b446ba89e20003c518fa67e5ecd60fa4fdf71d2f6ee1f966bb9d71086d6cd9e362c4c89066b1c0a0db65799c6d48bf50e5c37e5987317fcc51310112c4738715327fb85212b1d4d4b65130f7bb0b6c8fe645b95069cdd5a1ee7515c386fae0ff2cda3232233a9883545b010b83bf58e7ebb83ed5fc165ff871b4f8bd92b3c2672de94c1f8b82aeaa4bf8bd7321df231097ec281cc8322cb6e348dfe7d46c2db5554ee5f610191b386df55958c758f8c8edefafb481149f363058b1301cd633262e78ac19b009a721c9cc43256e179e67e1b60d26bc003a94ad08e170958067633e6d8cecaa421f1b9f85b5959627f819a97df9665c1df3fb21bf2a80f52fa35ea1ad480aa2008bd80ead582c829198b4b15bf395d4ab04a864e3558e2071c2b0a4f4513379d5ed9ad63588000b845fed3b4d2f784ad313de2cde22bda6603c3810e07c4e499e82b5db49345a06a05ca07532c509cfa22c33a41ccad1f50392a8fb951fa8a05f1afdda8b646fe4ae713d779479a3fc6a38ccecb2d86e94faca2da81ff07723ede32935ef70835edaf6cbdcb1759dc6ea0f02b5e8a1d15eb6039c763c342defe18136440359c928508cf05056e35c920b0763a9c6dfef28038b799ca2d9ba1dff299328a02833d039a2b193d70983ee00df893a6ef16dd706e4452c1975ca82d26706fddc6647500b4e52df90e16b78277a1136ab3032ef743de5c7803718b5ab662f00b43301ae9d958b4dadc279cdb4e9696cf72d7f27a236647b0b519c5cf6c63ddbadfdb3117c1a877c21bdf0aca790461bbd778a193c6c3ef93f85737aa251db53d3cebbae6f65187bcadf014dec0f8156d4f090db8c7711e630bca709d368452e0b01b6a704a8909f19c58d899ac5d59a4b94d1dbbe92d4024024e054bd0b1432654e45b6534bb0f1267977a9a3b9355b370235f7e5f0284b9503874f9bbec654e6df5dddcb46eae1308d9dfac58270ac5ac6e0c40850b9ca838907962a8e5bbfabd263af81eb7732da78d5d07df79b47215dbbe793a0d09adff694069daa2b81f8e1124cfb177b3155363dfcae894a4fddc33ab37dc0a3a1c587f185935fe8078f04f88ae73c2ad47634b92d55e29787b1e450d8e2c3c303fac6738374d5b335b01c9b96c60a171b89310e9c978b7345bd1d923479df58f4090d6733ccc22fccd0520eb1125615bcf3a6e2906e9d729a7e53c3eb51ce07c6c3d48101d9c41d2eaf0173227340296ddac690520d7ccc29839a22286ab54d85f14d3f6377349b0b234c9f563aabcd2132b86eb4689c37f25522f5a6f28c5c906d75c3152b59957e909208d55486665b109fd32a7ccf62e21eb7d4617074427f00d82efc15c25dfc10c9ab8f3645c74d43268671cef77b0e9819665e7aae949622529cc3b4a72c8bb8a4bc96dd5f7d810f7dbd7e169f601323ca40c950978eb86582261f7ee5cd4ea8a6ec8a406e11fa842119fc8cf708afe77e2cfb1661d8ccad3052b857e80a38afe5b5897a93579b0d7eb57363fd1021271df4a27a0c36309053b4e1039d8850ab046e9e82cbbfe9d575868eaedef9dbfc024182eef71d95f40c5b8df79d9e26ce7f026181dd1cb5883da136eb4231155b2e291658d8bc35be772286184a577a2637ab9e42f1326e42e2fae6f4be4f5983ad65dbc279a50ca6a533110543ef0bc010d0640b0b07266f6ec2af24adaf696f206d944d656d9617b7a9a692c1fd6ee4d45fd90901135b448e62c8c1eb5da890e2a56a210b480871237b447ce329e05cded0baaa2aa0ba109afb3f7ce903638ded9ca28606aa128597cc593ddcf346f380a80b0f3a79be45037deba2ef912815896bf4dbf93894838bb8448be8b7b4814a2dade78d94b3458301cfaff3302c966acc482a4b2d3791a185531e8f7f0903396af0b9f63d8c67ce50626a4eec6514e6b8ec5e256c8470c7fa1aed96e588dff58d29bf19cc93caad39b06f8f24500b1f58e42545e0167bfcc0150c9e78e131f7c5ef206ff34566cc13ba118c901598b96b257d9ae63b7ca915c6a9238a20ac915f7731d83699b28936a1af77dd6e5e84fc223213d1cbfaf1709b7442db5f6ae342e0d577d2485ca70ab6235fea810806fdca8e568605a884f2dafc5df681f42f38df16ee2c7ac0e0d7e3e0b4d67e035184c34706ba0d30d3af2798126893f695c3f028ec61bab258c24828a0bf12fec8e2187f51b97a97730093aeffeba2f1c018a23aa2477cbb09fd873b06f64fcee8a8a85c7b24b606e147b677dc7ee8d15200aaa8a7cd3db44f209bf37ea2a3d34302ba8847afa30f8dedcf48379d950c0071f85b7020f1a68ac38711f646cc060cf0a48a98e67ac41c6591997ac8d435c0a8d3777ce0ee95317024131cc09fff269a40fb57a084991aad175799dc3c190657eef13d84bbb00238c4a4a2ff8722ee144e6f4e6d2aa50e162e3e4d9eacee60e67d004ec85a089b275bdea557378d93f99ab992ac91bf7f2e22017bd911b9b4a53fbbc1c5c9e7a26ae6984bb7792f5a0afb59bbfad31ff4f1dd26d0bf968fd4b58fce4e2c297cf282d4d4487bd8b3632a1155968379725bff540a250ea3e75ea1eb1993acfe08f93bc27e0ca77b84962a39ad82aab9a0364d7a6a65e7532d9ea352d2fc3634510120c3a07a400bd56df587b052ba52d480a85d77e7c596247b4f11f40106f4a78c798fbed95fb21e77f44cf29304a37c5ece4e6fc8aac71734bad8311e4b2bfec6ec1584c9a65e3e0109d21ea5b3718f1a848fb39226c3d86916533639e30752e97d176644fff022d1f3b6145ec1f483ab1d255b892adf7caf17d3673be238d8debe86be639daacb46450bc2e1654456c9b133fa5687892f52831a09add8135dd23940ae929071cb24d922daa1fc4272fd69140a236c640b8ed23cd39054ad5d5596a155043e1e1b8bf7e8a39f19261aec8ef716b0a7e6bbf544008828dedf217c1222b3f5a717aae4283042a0995582fbd43820748e9cabbd764027dafc6b9978192e2e40e3eaa4b161ea598c0a66080029b9d5080d36a1985d1e74120c6cab96c9d0fd36f9c35e0694cf62ae7b9e68a71f288faa7c517337192fef3910c0ad7ae50a55661879b0bfdd165356d5510c457c63f203b177b13fabb17aebe267c0b0b6770e5636f907db249e9be6ddf29540b8c54f03d90bf093ca4e26344f677f46322f1ba15631eb6d53a26b4eb5a6543e7058018637d26a7419bc88a2cffb75c7edc450baf586b0c9927e71a931712e0bea91da8acea9782213cb605e5dad45abc20db609aa8e521fdd418b40bbfa"}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(r0, 0xd000943d, &(0x7f0000054100)={0x2, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r9}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r10}], 0x7f, "8eafcf6cc17ecc"}) ioctl$BTRFS_IOC_INO_LOOKUP_USER(r2, 0xd000943e, &(0x7f0000055100)={0x0, 0x0, "5e16ccde4c1a08bdba84ee91790e49783f0a7478a54e551ac65f9b3d31f9ffc7af2e0e6033f94e15d2d853c261dd082ec6442c5f52b582a8ffc046d3339fa1e265b27ab800c6665b61206a63130f7c6c460f31c652a6397d83af950061237d8d6efea2ca170646955b70f7640f43bbbe6e47574dd28d0b5e733a2bde20df898dfaf3dcacae855b372c53d3b1925424c99557f1d1783c8615f1aacab2dd5c98e375fe57cd9dc7ad19f8c2df4e304e2f7aa98c594d0c5543df5657302cdd19f5792e0997467768d12ce18351bf510906fb7354d8f11e8acdc5aae07834afdc35f92f47c6c7ee6ec6e2266da6b800fdf6a0752abc094c76e97b8525801d2975a9e6", "a613d3a620061e41ecc6d22192027f0600f13acdcf0b61975da21f83d773dac58f251e09e7c7b8c3b703965a04e8c69277ef9b630dd7992b7434a1db50587e49085694fe751e78f942ebcaa72460ccb92db41b333faca28cb7ab7eff64f33a427a4c3a704958343769fd01e86c34fa145747c9c6c64f9625b52b3bccdb905fa4da0bfcdcfb2f6c0cbe00234c4cea132f63bc3729cb21510268e865d8d3da84f71b0eab1415931d3da93c0643a01e63fa7031d97a42244b9560b1f926ab12348e4b903e93bcd293714ba11fac07e858272018e3a7bf9d6de0f682287e41a604935ebb1f33886e6ed3e6977de8650d40c1cf774da3a2d50ac01100f9cc584c2c8c693fb7283e25db7c1684682cab8ef2a61876c691b3c32ec3415dffda1c7c91eefd5c665b0dc89a5e9710477550328b842fecb3ced95488369019c987d4e2237a79f0f810f7ac1bde623e56d93a06f6945ef709d8810fdc5fd049e1b5053b2caa0547bcd8eec49a3305c5f5f63f6f68b2e8c9a15437a8f4726862fcc9989a425a475196de989ea259869233699320bcbff7908da4dcbc763f3f5da206b1fe65cb2f2a23938aec8bb4167865d11ee30c4b4ac1472898c4291e9fccb994b29ec12d1176776fd6b31cb0a4a36db894f2698160d89c9c07f7fd71912a2aaca6fac504d6a5731ad413701f734ba7f2139898d50c9b413524333821b0432f605e63640e5f9724679b64b558f0602f7475e5a57998be83977f95560859307ae15bb526391d8048f2733715ed144a63fa4d7154820c882a2c06ed41cf0d744640a498f952739db395f446fa40378d106afcff9f2cc6f0d973951d2c3807d7e83c79a4bd9da7319d29dea051c926a26b74b784412d9c40148ce44f5bf8d62549cc6a1a34f8d806d2891cef6cab75ffbd01b15e2dba49e2b4a3b632ca178e286e84c21169c5158b8e02c355b5b82a0a04c6667eb7927f4b5e623f2d7942609477c9c55925869785329181ccfc921ca0b425c8acb21874db3c3ebf6dd9d2dcdf6f4948f497448c5db597cc7303bb72b5767796ba2a28c00dce3f3b2515d808d9247acc9890b3e4b8af57c84271e15db787d07fc15dd43f3912678be6f9150aea86da86a973f00ad7523f691307a2a979dd81cab3dc47520028088fe1180c703cc8a4a15205b33e2cbbb39a74c027668b5dac4de55ac8c9c040556bfe070c41305a69b6298b086c7e181799709943bd6f0494cb6c4ad6146dc351ed0c639226257c87f049723f8a039b21557eaef3091d9a9afef59f63e48cd5acdda426fdc53f8791e4d5d8a7aa41f3ac4fd2102c56e402a88ab9a6c8bd1f641012a0b585020cbf0a019328bdda3c234c27da617f08558ab7392e0c0d4f39d96a3a763e65f5186a4dc00b06fa690bbad5dd62c760640f49036904b38ed51a02915342b3421915bc2876fb855a53e4a1592bc6da2b506eab435dd8e4db752be0481cf16110ae164b396e40ef6f4bb6a8e81ba8979eb877a21bcdf02bd4c9a6745a36678499cdd425847c8fb8b2edc61bfc1c00018b58172edb3d3928124e7f61e0e7d296716b96ecbb0fb2f3b95fee28dbba2057f76c70a4cf4e5ca323c7f01598adef78f37337506049f2a668575d5f368803fa77dc8a20caf945cb7fc7b42fa20f964ebc67505e3f32f84712019722c9f2d8e55e8bd5a9849083019f9784b588c97c0b17043e217b4d48564dfa5a4842b06369db286d81768f43c1c5fdeec8079c82d238a45121f4d26d27e41422c2f65aa622b464b2191cabedc550dd1eae7107a8795589d3940c14d2cac91a631dc5e609fbde4d1ddc22a62d8e05a5b146e087a4093bc0c838709c825f7b60a8bd621418847db1633c14e52158644b87f56856f57a737e28c0ea0f6435b8317e3e40ce3a23f74e444bbc3ea6b879c0bc05562231d90a647c91fb74ceed0ccafed62a2358b4f328f2676b7d6d8732f27156b3c2b33561e2f34b2b79c24cadfa192f7fabb592226c50b91b142539f8697f65ef18a00f6ab4a42fc21cfef80aab8268c917d0bdb548c5fcab248dc011d341ed8dc8f9e4268fe53e60ad1b8b6e1db09c4ef963c0c88709d2ede8971f6bbabdac364f13f68f00a4f471bf01824bc898101d864b75dc563eeb20ea72d10b71e9cea09a81a4e7f3ec44fdf184947092d1cbfe4318d4d03a8b7b1fc5ba9b87e87e7330659b8efdea0c28ddb6cfea6841c9fe6dbae82312904a80719fa890afb53a12f7d8537d7a961bb0fc1fec1cc26f055dd2ca1d55df83c82b29efc2f20ad233109183817d3c0c0b6cc1b8c3010b5d6215301dfd5d2a780fbfefd358a96238ab46f31c8468620a742bd6ab6ddcd2e99b5a715c5952eed8840e181ca59616546f7366e7304e5ff692d535aaccd826bf847b81ef192c6881e2806dfc451047890c634f9cad41294c34810c5e174620fffb1da7ae1ff1519734b58dbc503f525e33484a3e72573300d7691be08e0076c81af43539498ad219216eedb68b814900f73b20b3d408dd2db47e1e183184d2a393b3c17880df2fbd9d70d480f3232bf44759957a4f30d90e5c07c9671b0895e230c02393cf77b66a6b82b7217794e7570ad19af08d20c764b23f53dfc68da5d277306c9efb07e22cb0385be9e53dfae11d76657a4473deb217d440fcba8b0c6df0f9f69b7f5fca5ae82d13ef47c8a6bca4429dd427381df3b0805b7233d4090bf50d5a0ca0021ac5468910f704d1285adf04b067d6ccd117996f1d7d515dc89492b041a10b289da3414f13dc624699e2c4da9eb3c82391e413def43be951da45603786a750a16a1bb3a0ff79e54a399156afe05be380d344e25f2e4f09fd3a6c8eb2f4ffd094c484baf37e411a8769f52d090dd648b33e7748e38becaf952ccc68c152f0718a50a32960a2acd942581491207bba5da76e14fcfd1df00c6141dc919652f83a18b7e3e14aef608687c3538e6610368a5e785b5fdd42b9a4c25d80682857e6666a760650d32ac5b0d2fe8fa017cbed42e2fbce70ad29df7e55cfcbac1bba7d4f14ae6a2e3699fb9841738ae1205f92297c4ad682c6e4e3daaf0a6fba12fd86f839c8687322200402e389431dece6ad8a1a98367643a8fd14c7412db7b1cecebcb6509943c35fdf6f5b2ffba52433c31e4834a8100308d6dd6da4fbf8f4f89b11f29ba1a402b24de54c89868b53530b10b2abdf46894b6c56b834d5ecc80b39aed2595852fb7bd4e61f51a9dff599254e308a5659f09d4b1a4146ee71aff92602b13521f7b5dfd0fcf61f3930b75e00329ad6852436bebaf3f942533139ce9bacae3fa2c98ab96d2f5ebf7425dbd4c5506e188f4dfd2a7e61fd4fdd48c3a05ac15c7899c94d94ce3aedc355bc88dc2cee43972b846fec5b4a8376914262904bcc71a0983f452e1c03aa4a1625419609ae29230a71f79eb5032d23a44b05b42e09e0e7b65939e85b78c207144155028ba7bf9d7ba8c701e691a8b501738a8ae1b56c47486ed12c8e3a0ea85fa41d2c13429b68676d84bf196b7cc820f59bbfb322e8a21dd623f34f9f842a483e96c91fd32fe54c22ff74e521ee577f93e660f30e2a78f2685781503e7e1213d99d2fe2067feaf2c86f78cc31aeb4e4592196a44decd3cca66ccc0091b3240ac90f63c149939548ffecf1c921d73b7f70b65faea19db6892523afa4052c8b98021194b21b80f22102f8d9c26a6aca4d2719daa55914f3d382cb3e4405caf27ce75599e90d00d3c17c553a446366ba2b160f794db63fe6c18b271bc59ea29707b2be1cedf33b869d955c353b7e4586dbac773ad121650c05546158243cc681b20b5c3b3e8c823c47939cf28c1ed5436f3c983b68f51ec6ee4ed28db2b98f3756193f77cdc4b03bb7f697b1abe409da547b5f9b7c27f005969d0780585bde07aa40542ee9306dbe25e4bb06cdffa9f4e45b3b1cc1599c98596e420477c8ab32756b7a94ea0ea660ee351155951780accac4a86beb9856182eb5284fe82861aa59088de4b4ec8295e3d0ae43aa6d36af1cffc1ab74b3e534394ab139d616ab5dc7d3c3c26f614d7dfc127190065c390277600e4b428574d8441d7eccc0e1bc13aa43c03f29d5e36b4dc31f1659854bb8642ce07b9c9a4321cb775c25363a83a0eae3750face88cc50bf749e10ebfe1f6c9408059c99b32de50274ec965993ebaabb6813d8a403af459027fdfb9218ba4c976abb6fd9a051b0595286a2d7ae12afff818c52aac8d494a5d3a2ed7352f207ab8a98b443e65487afd25b4b5552f20ff96116849a54bfa778e3bbc83857f8353aff48238ca1823185fcc167e8f07be353a5dc7c381f71741206f29951f7eada6fff65c495755efcec0629f8fa4c0df50e7f1bbae8701be86b27542ba64f1819ff5c841079c8fa06b07c3fee32f987eedb2c9e17ea399325739bf975bc56e2af1ad33b7d09ada3fe55493c2cf856f4cdc1590f152c17067a1b4964afe6e96a16dccd9658c15a9b011ba901fa80da95681748fa2c5302f8b21b76826470e760311a14223b14a9f16eb2fa7eee63b4a7af8dad53375498d54e049011134cf648ce1c59127969e330d762441c3e58414088c1e1094c4fd6ec6985bcf6cce93340d410e72667c6a934ed4717800cd7acc37d238d5967e80c502ded7326032fadb0acd77d0e38aaeb1504fd9e660f94a06318eafeb330fba065fb7b452583554d276face76c6bce147e56514ad22e22457d4c6e577c321f9920de666710d84997e155aaf836de39e8c985c8e32cd9d996b9bc1f323b271755bb584260e90fe8c247dd929cf8a3a1b3cf2a0993d366007f7e3966c411ec577fac875f7a929da4f32391184817b8aa9da9e1cc9037e725f18efaafbb08d520faa8c80c0fd3a8ddebcc655431f0456ab6569d6d3f13f3cf36c3ed6d464b185ffc79b285dc0ae4a1f3dc2db072618b9ea21d2725e15c1f3f1e30fcfa204bafd7a0980d0963b9a49303d79356c9aac2e071f393df48106a751341ff0b61e1e4f092f9e461e5387bfae70957853919e32b15e152a2da097fa70b486c14fb45a23c5a20b53ceb9a08285efd0a73a65e8cd14d50cf5e01e7fd3177ed26f1394a0734a2a9242a04285571245d18d6f70bb0287420d0a0b1a2662b3b97dab6c18cc132f990426974bb6531c1dbe4ab6755c7c89987314513d8d1f054b5ab737f0920c32327d1310c2d256dfb49ee38dc98b751b67c988d79638cd9a2f822102402f028b676051920bbee264e0b7e4723cb82ae1af169443bfe16a265daa791e249da8efddbca2956b4c04aa1e5eeb0a8c354b5f3fadefbc1f8f33b96c19d76bf879d92951c38608a9142f899250dba80791d0a62a1b3b60b5e3a8f387bfedc40f60dc5e9341f0a51bc8151e5445c132dad069ee1f1bcded0c3ac2092969"}) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(r1, 0x81f8943c, &(0x7f0000056100)) (async) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(r1, 0x81f8943c, &(0x7f0000056100)={0x0, ""/256, 0x0}) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000100)) (async) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000100)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x0, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r19}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r20}], 0x0, "7464fbe08eb369"}) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000100)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x0, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r21}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r22}], 0x0, "7464fbe08eb369"}) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000100)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x0, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r23}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r24}], 0x0, "7464fbe08eb369"}) (async) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x0, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r23}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r24}], 0x0, "7464fbe08eb369"}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(r1, 0xd000943d, &(0x7f0000056300)={0xa2f8, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r3}, {r4, r7}, {0x0, r8}, {0x0, r11}, {r12, r14}, {r15}, {r16}, {r17}, {r18}, {r19}, {r21}, {r23}], 0x5, "0467da414c43ce"}) 10:42:19 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xfffff000}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:19 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = socket$inet_smc(0x2b, 0x1, 0x0) r2 = accept$packet(r0, &(0x7f0000000080)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @remote}, &(0x7f00000000c0)=0x14) sendfile(r1, r2, &(0x7f0000000000)=0xe83d, 0x1000001) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:19 executing program 2: r0 = accept(0xffffffffffffffff, &(0x7f0000000080)=@caif=@util, &(0x7f0000000000)=0x80) getsockopt$inet_sctp6_SCTP_STREAM_SCHEDULER_VALUE(0xffffffffffffffff, 0x84, 0x7c, &(0x7f0000000100)={0x0, 0xc3, 0x7}, &(0x7f0000000140)=0x8) setsockopt$inet_sctp6_SCTP_RESET_STREAMS(r0, 0x84, 0x77, &(0x7f0000000180)={r1, 0x1, 0x4, [0x2, 0xff, 0x3f, 0x281]}, 0x10) r2 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000040)='cpu.stat\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r2, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:19 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (async) r1 = accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) r2 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async, rerun: 32) sendmsg$nl_route(r2, 0x0, 0x0) (rerun: 32) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x0, [], 0x0, "7464fbe08eb369"}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(r1, 0xd000943d, &(0x7f0000050080)={0x8000000000000000, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, 0x0}], 0x80, "7f3bad9a41af7b"}) (async) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000100)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x0, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r4}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r5}], 0x0, "7464fbe08eb369"}) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000100)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x0, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r6}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r7}], 0x0, "7464fbe08eb369"}) (async, rerun: 32) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(r2, 0xd000943d, &(0x7f0000051080)={0x1, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, 0x0}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0}], 0x72, "50d84211be02a4"}) (async, rerun: 32) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(r1, 0xd000943d, &(0x7f0000052080)={0xb454, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, 0x0}], 0x5, "023e71e1659deb"}) (async, rerun: 32) ioctl$BTRFS_IOC_TREE_SEARCH_V2(r1, 0xc0709411, &(0x7f0000053080)={{0x0, 0x8, 0x1, 0xffffffff, 0x5, 0xfff, 0x200, 0x3, 0x0, 0x1, 0x40, 0x2, 0xfffffffffffeffff, 0x7, 0x1f}, 0x8, [0x0]}) (rerun: 32) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000100)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x0, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r13}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r14}], 0x0, "7464fbe08eb369"}) (async) ioctl$BTRFS_IOC_INO_LOOKUP_USER(r2, 0xd000943e, &(0x7f0000053100)={0x0, 0x0, "8709b3548f1eb64ce95d6d0ba0ef38fbc672aca8612dbb22178a9df59e1c1141098189a7109d63f84e3937aabbdfe0b1b52cd1cb8ba62dddb252dde54e268e36b92f0c724694196bba9637007ab95224bff3bd527550df659dd891459469acee8b049f7c64093d34a54283306d0a24ed523e191b54e82ae5b902bc56d6b2ac74e74d5c5eec3e71b0af05e31c851cb9b43a0bd9bd8062aff918bfb944ef10ad6cbece6f2422bba558a88a2ba9f8274d2dd6516d89e77b1db643bcaa439cb0a9e592f21f5d66b80f88b94b5eb2493eaa53557718fc237c999f26334ebff2621e25cf259f9e23947a5700c265b9ca8f819400857b1be3ea29256922772dff33b106", "9ba14b323cf103a12a354cf3f6036eb4085410964932d1b45912363452387fd0c7a67466e79ee3598432b5430f04482b1144861bac4deab1bf66fce3de878e76b4c5538c2b81996c4f6a06ad77d311d1350e1be9c1bb07d9d8f89d386ff16d1dc7e4189ed3180f8c9748978431465f7dc3f088f4f2cac2095d3d4a0fe28b544aeec612e4a6b42a59518b6551972c868a5b277a2c9fc840d6b2d15c5bf4943fce8c7d8e49f98a2b8a360922989cf375fcdfb70923693e34516dac5c5bebc4958812307fe457ef1bcdbff1db08d0237d1690c3fd9e73796a1524e06d402145ca6510219ad950a4fe1a7f33e777bda03198360c9b0e4dd5f40d6b4330e8c84e7bf10fcd8513e3031be6935666b73abd933527cbf8c776cea2d62b836a3a44f9ba6d27e6003e958559059dec4f8870f941390885aa5ece3f675ca2a2f8314687fe5ffcf7a3a9a84f2a04fec29403740c649247de911aac67effc0f488377358187656154c467d21e281279c73199fee29e167ef4a03ab65db6052ab35ad88eb2d0068e605480432b026cc53c1dfcc50e5c20d695f4ef97f89110a8781a9b0ac738f9d0ef41e5e7350041506c24801be8519143dc5df4e7d21b711f9fb61123a093f261e8f43ec8abc265be78d14aae2adeb1dcfa5a659d59ac71a3729e4ff01e80db03ce35ad4e785c879a419a227b2c1f88e673d094f2105e430a90e154368c1055baf1b2f4b7d7019e0593915dbe19bb04d88aeb3c569fae5e8343004c492dddf09bdfdb8081c22c541f692dbe90bb11b790a3809f64c9d318f7ff0a75579699df5c9dff025e90a5212732248c32cc021943e620c912cbc7570432739b935dd3ff339311ce0caf091a315b3b7a7aa3358fba72eb0e9aa3d38225b0fc83d17eb266bdf35a88d999b73a38dff769ddd6dde24b147c00ce2dddc1d2c120fc84538510e529221f713065e17aae26916f1c986e08ecd902a0ef0ed839eea49372890518765c7a310df75eb3b9b624e758821e57e3e9dde7098ab51fa20988b3d50274cc77efdf0119c8b3fdfd08c4357c77a8db037efbd4768f4f0f58f69c8eb6bafaab0675bcca1e1d9b954c2c287d48ac89ef0dc3151b32eb296f4a61ef7d2154a1ce35586f422b726d6e505e4bd16fc7436db4987e4efaebd751358daa4e035b2122d82b7cd14463e3534cdf77c816892ed0ecf8ab4dd8529e1065571aecdbadbd12e6c8f904cc2eb25aae6be9ae582792fbcb2c5a889caf85349d9c967f7335105c76bab18193f3a9706f75386c7d13b7baeb685129c740f659dd30113ef0db36d9d9efbd254ff931dc8467c59508e8e5cdef7744ce87612d7429fe265b12f781fac3cc54ac21c90288a06b87a375109f840c1697308fc3fc8866138fb2992889acb8376a20120bf77b5cc0064dc231224b616bdf76f9abd85b7617a4cefcb47079cb4563e30a76d0a07b7c77b776074c3e876369a49c3a8ebb35023e4c1ddb9eead70202ddc7f551e8e71cac9da7ff057ec990bd0d9bd8e410e0efae3beff0a3473ce946ccc657737292cfaf267b22e40795a9bf6fe1a3a6ede42d34ad5426dfe9c19eec6224daf1a3a3f7b5d90697b2969cd20550a47b8c359daeb733db36182edaa7addb4ac903a59ecb5a273a85eca1fff65a164a670ca17079eb8f0217f31458ea6aafba6c621d8c02bd430af3f479bffab8d0f01650360ef80259f3dbe819600045507d907b5ed06578a1fe48873c6caedaee50f54c27389cef592128775c77679f70ca46fe34685d7c6c89770bd4c03437384df0cdca3c7dbc8aa148420874838b50a75f8c530a9ca814eb95237865a44704d3184e0a8e2010e9407b1fc39b0194769bfbed487159ea96cf3b3381d4b5e90b687d15f3f26fd081a01ccc7d6da891d7d54fd5e119a4242ab297cca7c538bebc5b1667cc682f5d65e076cdea7c5273de8c7c38f21a0caa7c59dd15ecadd6b91f4e4e493b059274d62ea765e7ef0b1531c311e9dd94e97f27fb1fa4ccc759f5a8c7221a393249195e835ba8c4cc7cc89f792dbb5880e742011930e7df0c03384302587dc775a6ef98f1c805db42b9fe85fea271fea1f8a65b8086d7b446ba89e20003c518fa67e5ecd60fa4fdf71d2f6ee1f966bb9d71086d6cd9e362c4c89066b1c0a0db65799c6d48bf50e5c37e5987317fcc51310112c4738715327fb85212b1d4d4b65130f7bb0b6c8fe645b95069cdd5a1ee7515c386fae0ff2cda3232233a9883545b010b83bf58e7ebb83ed5fc165ff871b4f8bd92b3c2672de94c1f8b82aeaa4bf8bd7321df231097ec281cc8322cb6e348dfe7d46c2db5554ee5f610191b386df55958c758f8c8edefafb481149f363058b1301cd633262e78ac19b009a721c9cc43256e179e67e1b60d26bc003a94ad08e170958067633e6d8cecaa421f1b9f85b5959627f819a97df9665c1df3fb21bf2a80f52fa35ea1ad480aa2008bd80ead582c829198b4b15bf395d4ab04a864e3558e2071c2b0a4f4513379d5ed9ad63588000b845fed3b4d2f784ad313de2cde22bda6603c3810e07c4e499e82b5db49345a06a05ca07532c509cfa22c33a41ccad1f50392a8fb951fa8a05f1afdda8b646fe4ae713d779479a3fc6a38ccecb2d86e94faca2da81ff07723ede32935ef70835edaf6cbdcb1759dc6ea0f02b5e8a1d15eb6039c763c342defe18136440359c928508cf05056e35c920b0763a9c6dfef28038b799ca2d9ba1dff299328a02833d039a2b193d70983ee00df893a6ef16dd706e4452c1975ca82d26706fddc6647500b4e52df90e16b78277a1136ab3032ef743de5c7803718b5ab662f00b43301ae9d958b4dadc279cdb4e9696cf72d7f27a236647b0b519c5cf6c63ddbadfdb3117c1a877c21bdf0aca790461bbd778a193c6c3ef93f85737aa251db53d3cebbae6f65187bcadf014dec0f8156d4f090db8c7711e630bca709d368452e0b01b6a704a8909f19c58d899ac5d59a4b94d1dbbe92d4024024e054bd0b1432654e45b6534bb0f1267977a9a3b9355b370235f7e5f0284b9503874f9bbec654e6df5dddcb46eae1308d9dfac58270ac5ac6e0c40850b9ca838907962a8e5bbfabd263af81eb7732da78d5d07df79b47215dbbe793a0d09adff694069daa2b81f8e1124cfb177b3155363dfcae894a4fddc33ab37dc0a3a1c587f185935fe8078f04f88ae73c2ad47634b92d55e29787b1e450d8e2c3c303fac6738374d5b335b01c9b96c60a171b89310e9c978b7345bd1d923479df58f4090d6733ccc22fccd0520eb1125615bcf3a6e2906e9d729a7e53c3eb51ce07c6c3d48101d9c41d2eaf0173227340296ddac690520d7ccc29839a22286ab54d85f14d3f6377349b0b234c9f563aabcd2132b86eb4689c37f25522f5a6f28c5c906d75c3152b59957e909208d55486665b109fd32a7ccf62e21eb7d4617074427f00d82efc15c25dfc10c9ab8f3645c74d43268671cef77b0e9819665e7aae949622529cc3b4a72c8bb8a4bc96dd5f7d810f7dbd7e169f601323ca40c950978eb86582261f7ee5cd4ea8a6ec8a406e11fa842119fc8cf708afe77e2cfb1661d8ccad3052b857e80a38afe5b5897a93579b0d7eb57363fd1021271df4a27a0c36309053b4e1039d8850ab046e9e82cbbfe9d575868eaedef9dbfc024182eef71d95f40c5b8df79d9e26ce7f026181dd1cb5883da136eb4231155b2e291658d8bc35be772286184a577a2637ab9e42f1326e42e2fae6f4be4f5983ad65dbc279a50ca6a533110543ef0bc010d0640b0b07266f6ec2af24adaf696f206d944d656d9617b7a9a692c1fd6ee4d45fd90901135b448e62c8c1eb5da890e2a56a210b480871237b447ce329e05cded0baaa2aa0ba109afb3f7ce903638ded9ca28606aa128597cc593ddcf346f380a80b0f3a79be45037deba2ef912815896bf4dbf93894838bb8448be8b7b4814a2dade78d94b3458301cfaff3302c966acc482a4b2d3791a185531e8f7f0903396af0b9f63d8c67ce50626a4eec6514e6b8ec5e256c8470c7fa1aed96e588dff58d29bf19cc93caad39b06f8f24500b1f58e42545e0167bfcc0150c9e78e131f7c5ef206ff34566cc13ba118c901598b96b257d9ae63b7ca915c6a9238a20ac915f7731d83699b28936a1af77dd6e5e84fc223213d1cbfaf1709b7442db5f6ae342e0d577d2485ca70ab6235fea810806fdca8e568605a884f2dafc5df681f42f38df16ee2c7ac0e0d7e3e0b4d67e035184c34706ba0d30d3af2798126893f695c3f028ec61bab258c24828a0bf12fec8e2187f51b97a97730093aeffeba2f1c018a23aa2477cbb09fd873b06f64fcee8a8a85c7b24b606e147b677dc7ee8d15200aaa8a7cd3db44f209bf37ea2a3d34302ba8847afa30f8dedcf48379d950c0071f85b7020f1a68ac38711f646cc060cf0a48a98e67ac41c6591997ac8d435c0a8d3777ce0ee95317024131cc09fff269a40fb57a084991aad175799dc3c190657eef13d84bbb00238c4a4a2ff8722ee144e6f4e6d2aa50e162e3e4d9eacee60e67d004ec85a089b275bdea557378d93f99ab992ac91bf7f2e22017bd911b9b4a53fbbc1c5c9e7a26ae6984bb7792f5a0afb59bbfad31ff4f1dd26d0bf968fd4b58fce4e2c297cf282d4d4487bd8b3632a1155968379725bff540a250ea3e75ea1eb1993acfe08f93bc27e0ca77b84962a39ad82aab9a0364d7a6a65e7532d9ea352d2fc3634510120c3a07a400bd56df587b052ba52d480a85d77e7c596247b4f11f40106f4a78c798fbed95fb21e77f44cf29304a37c5ece4e6fc8aac71734bad8311e4b2bfec6ec1584c9a65e3e0109d21ea5b3718f1a848fb39226c3d86916533639e30752e97d176644fff022d1f3b6145ec1f483ab1d255b892adf7caf17d3673be238d8debe86be639daacb46450bc2e1654456c9b133fa5687892f52831a09add8135dd23940ae929071cb24d922daa1fc4272fd69140a236c640b8ed23cd39054ad5d5596a155043e1e1b8bf7e8a39f19261aec8ef716b0a7e6bbf544008828dedf217c1222b3f5a717aae4283042a0995582fbd43820748e9cabbd764027dafc6b9978192e2e40e3eaa4b161ea598c0a66080029b9d5080d36a1985d1e74120c6cab96c9d0fd36f9c35e0694cf62ae7b9e68a71f288faa7c517337192fef3910c0ad7ae50a55661879b0bfdd165356d5510c457c63f203b177b13fabb17aebe267c0b0b6770e5636f907db249e9be6ddf29540b8c54f03d90bf093ca4e26344f677f46322f1ba15631eb6d53a26b4eb5a6543e7058018637d26a7419bc88a2cffb75c7edc450baf586b0c9927e71a931712e0bea91da8acea9782213cb605e5dad45abc20db609aa8e521fdd418b40bbfa"}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(r0, 0xd000943d, &(0x7f0000054100)={0x2, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r9}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r10}], 0x7f, "8eafcf6cc17ecc"}) ioctl$BTRFS_IOC_INO_LOOKUP_USER(r2, 0xd000943e, &(0x7f0000055100)={0x0, 0x0, "5e16ccde4c1a08bdba84ee91790e49783f0a7478a54e551ac65f9b3d31f9ffc7af2e0e6033f94e15d2d853c261dd082ec6442c5f52b582a8ffc046d3339fa1e265b27ab800c6665b61206a63130f7c6c460f31c652a6397d83af950061237d8d6efea2ca170646955b70f7640f43bbbe6e47574dd28d0b5e733a2bde20df898dfaf3dcacae855b372c53d3b1925424c99557f1d1783c8615f1aacab2dd5c98e375fe57cd9dc7ad19f8c2df4e304e2f7aa98c594d0c5543df5657302cdd19f5792e0997467768d12ce18351bf510906fb7354d8f11e8acdc5aae07834afdc35f92f47c6c7ee6ec6e2266da6b800fdf6a0752abc094c76e97b8525801d2975a9e6", "a613d3a620061e41ecc6d22192027f0600f13acdcf0b61975da21f83d773dac58f251e09e7c7b8c3b703965a04e8c69277ef9b630dd7992b7434a1db50587e49085694fe751e78f942ebcaa72460ccb92db41b333faca28cb7ab7eff64f33a427a4c3a704958343769fd01e86c34fa145747c9c6c64f9625b52b3bccdb905fa4da0bfcdcfb2f6c0cbe00234c4cea132f63bc3729cb21510268e865d8d3da84f71b0eab1415931d3da93c0643a01e63fa7031d97a42244b9560b1f926ab12348e4b903e93bcd293714ba11fac07e858272018e3a7bf9d6de0f682287e41a604935ebb1f33886e6ed3e6977de8650d40c1cf774da3a2d50ac01100f9cc584c2c8c693fb7283e25db7c1684682cab8ef2a61876c691b3c32ec3415dffda1c7c91eefd5c665b0dc89a5e9710477550328b842fecb3ced95488369019c987d4e2237a79f0f810f7ac1bde623e56d93a06f6945ef709d8810fdc5fd049e1b5053b2caa0547bcd8eec49a3305c5f5f63f6f68b2e8c9a15437a8f4726862fcc9989a425a475196de989ea259869233699320bcbff7908da4dcbc763f3f5da206b1fe65cb2f2a23938aec8bb4167865d11ee30c4b4ac1472898c4291e9fccb994b29ec12d1176776fd6b31cb0a4a36db894f2698160d89c9c07f7fd71912a2aaca6fac504d6a5731ad413701f734ba7f2139898d50c9b413524333821b0432f605e63640e5f9724679b64b558f0602f7475e5a57998be83977f95560859307ae15bb526391d8048f2733715ed144a63fa4d7154820c882a2c06ed41cf0d744640a498f952739db395f446fa40378d106afcff9f2cc6f0d973951d2c3807d7e83c79a4bd9da7319d29dea051c926a26b74b784412d9c40148ce44f5bf8d62549cc6a1a34f8d806d2891cef6cab75ffbd01b15e2dba49e2b4a3b632ca178e286e84c21169c5158b8e02c355b5b82a0a04c6667eb7927f4b5e623f2d7942609477c9c55925869785329181ccfc921ca0b425c8acb21874db3c3ebf6dd9d2dcdf6f4948f497448c5db597cc7303bb72b5767796ba2a28c00dce3f3b2515d808d9247acc9890b3e4b8af57c84271e15db787d07fc15dd43f3912678be6f9150aea86da86a973f00ad7523f691307a2a979dd81cab3dc47520028088fe1180c703cc8a4a15205b33e2cbbb39a74c027668b5dac4de55ac8c9c040556bfe070c41305a69b6298b086c7e181799709943bd6f0494cb6c4ad6146dc351ed0c639226257c87f049723f8a039b21557eaef3091d9a9afef59f63e48cd5acdda426fdc53f8791e4d5d8a7aa41f3ac4fd2102c56e402a88ab9a6c8bd1f641012a0b585020cbf0a019328bdda3c234c27da617f08558ab7392e0c0d4f39d96a3a763e65f5186a4dc00b06fa690bbad5dd62c760640f49036904b38ed51a02915342b3421915bc2876fb855a53e4a1592bc6da2b506eab435dd8e4db752be0481cf16110ae164b396e40ef6f4bb6a8e81ba8979eb877a21bcdf02bd4c9a6745a36678499cdd425847c8fb8b2edc61bfc1c00018b58172edb3d3928124e7f61e0e7d296716b96ecbb0fb2f3b95fee28dbba2057f76c70a4cf4e5ca323c7f01598adef78f37337506049f2a668575d5f368803fa77dc8a20caf945cb7fc7b42fa20f964ebc67505e3f32f84712019722c9f2d8e55e8bd5a9849083019f9784b588c97c0b17043e217b4d48564dfa5a4842b06369db286d81768f43c1c5fdeec8079c82d238a45121f4d26d27e41422c2f65aa622b464b2191cabedc550dd1eae7107a8795589d3940c14d2cac91a631dc5e609fbde4d1ddc22a62d8e05a5b146e087a4093bc0c838709c825f7b60a8bd621418847db1633c14e52158644b87f56856f57a737e28c0ea0f6435b8317e3e40ce3a23f74e444bbc3ea6b879c0bc05562231d90a647c91fb74ceed0ccafed62a2358b4f328f2676b7d6d8732f27156b3c2b33561e2f34b2b79c24cadfa192f7fabb592226c50b91b142539f8697f65ef18a00f6ab4a42fc21cfef80aab8268c917d0bdb548c5fcab248dc011d341ed8dc8f9e4268fe53e60ad1b8b6e1db09c4ef963c0c88709d2ede8971f6bbabdac364f13f68f00a4f471bf01824bc898101d864b75dc563eeb20ea72d10b71e9cea09a81a4e7f3ec44fdf184947092d1cbfe4318d4d03a8b7b1fc5ba9b87e87e7330659b8efdea0c28ddb6cfea6841c9fe6dbae82312904a80719fa890afb53a12f7d8537d7a961bb0fc1fec1cc26f055dd2ca1d55df83c82b29efc2f20ad233109183817d3c0c0b6cc1b8c3010b5d6215301dfd5d2a780fbfefd358a96238ab46f31c8468620a742bd6ab6ddcd2e99b5a715c5952eed8840e181ca59616546f7366e7304e5ff692d535aaccd826bf847b81ef192c6881e2806dfc451047890c634f9cad41294c34810c5e174620fffb1da7ae1ff1519734b58dbc503f525e33484a3e72573300d7691be08e0076c81af43539498ad219216eedb68b814900f73b20b3d408dd2db47e1e183184d2a393b3c17880df2fbd9d70d480f3232bf44759957a4f30d90e5c07c9671b0895e230c02393cf77b66a6b82b7217794e7570ad19af08d20c764b23f53dfc68da5d277306c9efb07e22cb0385be9e53dfae11d76657a4473deb217d440fcba8b0c6df0f9f69b7f5fca5ae82d13ef47c8a6bca4429dd427381df3b0805b7233d4090bf50d5a0ca0021ac5468910f704d1285adf04b067d6ccd117996f1d7d515dc89492b041a10b289da3414f13dc624699e2c4da9eb3c82391e413def43be951da45603786a750a16a1bb3a0ff79e54a399156afe05be380d344e25f2e4f09fd3a6c8eb2f4ffd094c484baf37e411a8769f52d090dd648b33e7748e38becaf952ccc68c152f0718a50a32960a2acd942581491207bba5da76e14fcfd1df00c6141dc919652f83a18b7e3e14aef608687c3538e6610368a5e785b5fdd42b9a4c25d80682857e6666a760650d32ac5b0d2fe8fa017cbed42e2fbce70ad29df7e55cfcbac1bba7d4f14ae6a2e3699fb9841738ae1205f92297c4ad682c6e4e3daaf0a6fba12fd86f839c8687322200402e389431dece6ad8a1a98367643a8fd14c7412db7b1cecebcb6509943c35fdf6f5b2ffba52433c31e4834a8100308d6dd6da4fbf8f4f89b11f29ba1a402b24de54c89868b53530b10b2abdf46894b6c56b834d5ecc80b39aed2595852fb7bd4e61f51a9dff599254e308a5659f09d4b1a4146ee71aff92602b13521f7b5dfd0fcf61f3930b75e00329ad6852436bebaf3f942533139ce9bacae3fa2c98ab96d2f5ebf7425dbd4c5506e188f4dfd2a7e61fd4fdd48c3a05ac15c7899c94d94ce3aedc355bc88dc2cee43972b846fec5b4a8376914262904bcc71a0983f452e1c03aa4a1625419609ae29230a71f79eb5032d23a44b05b42e09e0e7b65939e85b78c207144155028ba7bf9d7ba8c701e691a8b501738a8ae1b56c47486ed12c8e3a0ea85fa41d2c13429b68676d84bf196b7cc820f59bbfb322e8a21dd623f34f9f842a483e96c91fd32fe54c22ff74e521ee577f93e660f30e2a78f2685781503e7e1213d99d2fe2067feaf2c86f78cc31aeb4e4592196a44decd3cca66ccc0091b3240ac90f63c149939548ffecf1c921d73b7f70b65faea19db6892523afa4052c8b98021194b21b80f22102f8d9c26a6aca4d2719daa55914f3d382cb3e4405caf27ce75599e90d00d3c17c553a446366ba2b160f794db63fe6c18b271bc59ea29707b2be1cedf33b869d955c353b7e4586dbac773ad121650c05546158243cc681b20b5c3b3e8c823c47939cf28c1ed5436f3c983b68f51ec6ee4ed28db2b98f3756193f77cdc4b03bb7f697b1abe409da547b5f9b7c27f005969d0780585bde07aa40542ee9306dbe25e4bb06cdffa9f4e45b3b1cc1599c98596e420477c8ab32756b7a94ea0ea660ee351155951780accac4a86beb9856182eb5284fe82861aa59088de4b4ec8295e3d0ae43aa6d36af1cffc1ab74b3e534394ab139d616ab5dc7d3c3c26f614d7dfc127190065c390277600e4b428574d8441d7eccc0e1bc13aa43c03f29d5e36b4dc31f1659854bb8642ce07b9c9a4321cb775c25363a83a0eae3750face88cc50bf749e10ebfe1f6c9408059c99b32de50274ec965993ebaabb6813d8a403af459027fdfb9218ba4c976abb6fd9a051b0595286a2d7ae12afff818c52aac8d494a5d3a2ed7352f207ab8a98b443e65487afd25b4b5552f20ff96116849a54bfa778e3bbc83857f8353aff48238ca1823185fcc167e8f07be353a5dc7c381f71741206f29951f7eada6fff65c495755efcec0629f8fa4c0df50e7f1bbae8701be86b27542ba64f1819ff5c841079c8fa06b07c3fee32f987eedb2c9e17ea399325739bf975bc56e2af1ad33b7d09ada3fe55493c2cf856f4cdc1590f152c17067a1b4964afe6e96a16dccd9658c15a9b011ba901fa80da95681748fa2c5302f8b21b76826470e760311a14223b14a9f16eb2fa7eee63b4a7af8dad53375498d54e049011134cf648ce1c59127969e330d762441c3e58414088c1e1094c4fd6ec6985bcf6cce93340d410e72667c6a934ed4717800cd7acc37d238d5967e80c502ded7326032fadb0acd77d0e38aaeb1504fd9e660f94a06318eafeb330fba065fb7b452583554d276face76c6bce147e56514ad22e22457d4c6e577c321f9920de666710d84997e155aaf836de39e8c985c8e32cd9d996b9bc1f323b271755bb584260e90fe8c247dd929cf8a3a1b3cf2a0993d366007f7e3966c411ec577fac875f7a929da4f32391184817b8aa9da9e1cc9037e725f18efaafbb08d520faa8c80c0fd3a8ddebcc655431f0456ab6569d6d3f13f3cf36c3ed6d464b185ffc79b285dc0ae4a1f3dc2db072618b9ea21d2725e15c1f3f1e30fcfa204bafd7a0980d0963b9a49303d79356c9aac2e071f393df48106a751341ff0b61e1e4f092f9e461e5387bfae70957853919e32b15e152a2da097fa70b486c14fb45a23c5a20b53ceb9a08285efd0a73a65e8cd14d50cf5e01e7fd3177ed26f1394a0734a2a9242a04285571245d18d6f70bb0287420d0a0b1a2662b3b97dab6c18cc132f990426974bb6531c1dbe4ab6755c7c89987314513d8d1f054b5ab737f0920c32327d1310c2d256dfb49ee38dc98b751b67c988d79638cd9a2f822102402f028b676051920bbee264e0b7e4723cb82ae1af169443bfe16a265daa791e249da8efddbca2956b4c04aa1e5eeb0a8c354b5f3fadefbc1f8f33b96c19d76bf879d92951c38608a9142f899250dba80791d0a62a1b3b60b5e3a8f387bfedc40f60dc5e9341f0a51bc8151e5445c132dad069ee1f1bcded0c3ac2092969"}) (async) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(r1, 0x81f8943c, &(0x7f0000056100)={0x0, ""/256, 0x0}) (async) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000100)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x0, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r19}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r20}], 0x0, "7464fbe08eb369"}) (async) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000100)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x0, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r21}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r22}], 0x0, "7464fbe08eb369"}) (async, rerun: 32) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000100)={0x0, ""/256, 0x0, 0x0}) (rerun: 32) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x0, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r23}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r24}], 0x0, "7464fbe08eb369"}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(r1, 0xd000943d, &(0x7f0000056300)={0xa2f8, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r3}, {r4, r7}, {0x0, r8}, {0x0, r11}, {r12, r14}, {r15}, {r16}, {r17}, {r18}, {r19}, {r21}, {r23}], 0x5, "0467da414c43ce"}) [ 2203.873009][ T8667] netlink: 'syz-executor.3': attribute type 1 has an invalid length. 10:42:20 executing program 2: r0 = accept(0xffffffffffffffff, &(0x7f0000000080)=@caif=@util, &(0x7f0000000000)=0x80) getsockopt$inet_sctp6_SCTP_STREAM_SCHEDULER_VALUE(0xffffffffffffffff, 0x84, 0x7c, &(0x7f0000000100)={0x0, 0xc3, 0x7}, &(0x7f0000000140)=0x8) setsockopt$inet_sctp6_SCTP_RESET_STREAMS(r0, 0x84, 0x77, &(0x7f0000000180)={r1, 0x1, 0x4, [0x2, 0xff, 0x3f, 0x281]}, 0x10) r2 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000040)='cpu.stat\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r2, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2203.916544][ T8697] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2204.029001][ T8667] 8021q: adding VLAN 0 to HW filter on device bond1459 [ 2204.160823][ T8669] bond1459: (slave bridge1354): making interface the new active one [ 2204.187792][ T8669] bond1459: (slave bridge1354): Enslaving as an active interface with an up link 10:42:20 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0x8}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:20 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = socket$inet_smc(0x2b, 0x1, 0x0) r2 = accept$packet(r0, &(0x7f0000000080)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @remote}, &(0x7f00000000c0)=0x14) sendfile(r1, r2, &(0x7f0000000000)=0xe83d, 0x1000001) (async) sendfile(r1, r2, &(0x7f0000000000)=0xe83d, 0x1000001) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:20 executing program 2: r0 = accept(0xffffffffffffffff, &(0x7f0000000080)=@caif=@util, &(0x7f0000000000)=0x80) getsockopt$inet_sctp6_SCTP_STREAM_SCHEDULER_VALUE(0xffffffffffffffff, 0x84, 0x7c, &(0x7f0000000100)={0x0, 0xc3, 0x7}, &(0x7f0000000140)=0x8) setsockopt$inet_sctp6_SCTP_RESET_STREAMS(r0, 0x84, 0x77, &(0x7f0000000180)={r1, 0x1, 0x4, [0x2, 0xff, 0x3f, 0x281]}, 0x10) (async) r2 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000040)='cpu.stat\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r2, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:20 executing program 0: setsockopt$IP_VS_SO_SET_DEL(0xffffffffffffffff, 0x0, 0x484, &(0x7f0000000000)={0x88, @dev={0xac, 0x14, 0x14, 0x1a}, 0x4e23, 0x4, 'dh\x00', 0x2, 0x7fffffff, 0x20}, 0x2c) mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r0 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r0, 0x0, 0x0) [ 2204.216425][ T8692] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2204.256630][ T8692] 8021q: adding VLAN 0 to HW filter on device bond859 10:42:20 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xffffff9e}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) [ 2204.318381][ T8688] bond859: (slave bridge1014): making interface the new active one [ 2204.332088][ T8688] bond859: (slave bridge1014): Enslaving as an active interface with an up link 10:42:20 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = bpf$ITER_CREATE(0x21, &(0x7f0000000000)={r0}, 0x8) ioctl$sock_qrtr_TIOCOUTQ(r1, 0x5411, &(0x7f0000000080)) r2 = syz_init_net_socket$nl_rdma(0x10, 0x3, 0x10) getsockopt$SO_TIMESTAMPING(r2, 0x1, 0x41, &(0x7f0000000140), &(0x7f0000000180)=0x4) r3 = socket$inet6_tcp(0xa, 0x1, 0x0) ioctl$F2FS_IOC_RESIZE_FS(r3, 0x4008f510, &(0x7f0000000100)=0x9) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f00000000c0)=0xfffffffffffffbff) 10:42:20 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = socket$inet_smc(0x2b, 0x1, 0x0) r2 = accept$packet(r0, &(0x7f0000000080)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @remote}, &(0x7f00000000c0)=0x14) sendfile(r1, r2, &(0x7f0000000000)=0xe83d, 0x1000001) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2204.539804][ T8701] 8021q: adding VLAN 0 to HW filter on device bond1422 10:42:20 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xffffff7f}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:20 executing program 0: setsockopt$IP_VS_SO_SET_DEL(0xffffffffffffffff, 0x0, 0x484, &(0x7f0000000000)={0x88, @dev={0xac, 0x14, 0x14, 0x1a}, 0x4e23, 0x4, 'dh\x00', 0x2, 0x7fffffff, 0x20}, 0x2c) (async) mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) (async) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r0 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) sendmsg$nl_route(r0, 0x0, 0x0) [ 2204.721835][ T8703] bond1422: (slave bridge1284): making interface the new active one [ 2204.744933][ T8703] bond1422: (slave bridge1284): Enslaving as an active interface with an up link 10:42:20 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = accept(r0, 0x0, &(0x7f0000000000)) setsockopt$XDP_TX_RING(r1, 0x11b, 0x3, &(0x7f0000000080)=0x1000, 0x4) 10:42:20 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = bpf$ITER_CREATE(0x21, &(0x7f0000000000)={r0}, 0x8) ioctl$sock_qrtr_TIOCOUTQ(r1, 0x5411, &(0x7f0000000080)) (async) r2 = syz_init_net_socket$nl_rdma(0x10, 0x3, 0x10) getsockopt$SO_TIMESTAMPING(r2, 0x1, 0x41, &(0x7f0000000140), &(0x7f0000000180)=0x4) (async) r3 = socket$inet6_tcp(0xa, 0x1, 0x0) ioctl$F2FS_IOC_RESIZE_FS(r3, 0x4008f510, &(0x7f0000000100)=0x9) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f00000000c0)=0xfffffffffffffbff) 10:42:20 executing program 0: setsockopt$IP_VS_SO_SET_DEL(0xffffffffffffffff, 0x0, 0x484, &(0x7f0000000000)={0x88, @dev={0xac, 0x14, 0x14, 0x1a}, 0x4e23, 0x4, 'dh\x00', 0x2, 0x7fffffff, 0x20}, 0x2c) (async) mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (async) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) r0 = socket$netlink(0x10, 0x3, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r0, 0x0, 0x0) [ 2204.974631][ T8726] 8021q: adding VLAN 0 to HW filter on device bond860 [ 2205.215713][ T8729] bond860: (slave bridge1015): making interface the new active one [ 2205.233303][ T8729] bond860: (slave bridge1015): Enslaving as an active interface with an up link [ 2205.251428][ T8730] validate_nla: 2 callbacks suppressed [ 2205.251448][ T8730] netlink: 'syz-executor.3': attribute type 1 has an invalid length. [ 2205.329108][ T8730] 8021q: adding VLAN 0 to HW filter on device bond1460 10:42:21 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0x9}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:21 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = accept(r0, 0x0, &(0x7f0000000000)) setsockopt$XDP_TX_RING(r1, 0x11b, 0x3, &(0x7f0000000080)=0x1000, 0x4) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) accept(r0, 0x0, &(0x7f0000000000)) (async) setsockopt$XDP_TX_RING(r1, 0x11b, 0x3, &(0x7f0000000080)=0x1000, 0x4) (async) 10:42:21 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = bpf$ITER_CREATE(0x21, &(0x7f0000000000)={r0}, 0x8) ioctl$sock_qrtr_TIOCOUTQ(r1, 0x5411, &(0x7f0000000080)) r2 = syz_init_net_socket$nl_rdma(0x10, 0x3, 0x10) getsockopt$SO_TIMESTAMPING(r2, 0x1, 0x41, &(0x7f0000000140), &(0x7f0000000180)=0x4) socket$inet6_tcp(0xa, 0x1, 0x0) (async) r3 = socket$inet6_tcp(0xa, 0x1, 0x0) ioctl$F2FS_IOC_RESIZE_FS(r3, 0x4008f510, &(0x7f0000000100)=0x9) (async) ioctl$F2FS_IOC_RESIZE_FS(r3, 0x4008f510, &(0x7f0000000100)=0x9) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f00000000c0)=0xfffffffffffffbff) 10:42:21 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xffffffe4}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:42:21 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) r1 = accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) sendmsg$nl_route(r1, &(0x7f00000000c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x200}, 0xc, &(0x7f0000000080)={&(0x7f0000000040)=@ipv4_getrule={0x1c, 0x22, 0x200, 0x70bd28, 0x25dfdbfc, {0x2, 0x0, 0x10, 0x1f, 0x9, 0x0, 0x0, 0x2, 0x1}, [""]}, 0x1c}, 0x1, 0x0, 0x0, 0x4000800}, 0x400) r2 = syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r3 = socket$netlink(0x10, 0x3, 0x0) sendmsg$SOCK_DIAG_BY_FAMILY(r1, &(0x7f00000005c0)={&(0x7f0000000540)={0x10, 0x0, 0x0, 0x80}, 0xc, &(0x7f0000000580)={&(0x7f00000017c0)={0x13f4, 0x14, 0x0, 0x70bd2c, 0x25dfdbfe, {0x22, 0x3f}, [@INET_DIAG_REQ_BYTECODE={0x1004, 0x1, "1e6ec9f7b6bc3ec9d617afe6cd0594d5b0f4326dcfd76b66b67d0b31cd9d220f73cc4acbcd9275a9429048fb746b00e22bd5aeef5d39702a327cbe87f000d6cfe52036eba14d72af982451c36f607484eb6cf26e49ba40c80d970c3f78ddb079c787471a71de61383e0bb215d9ce3201bafdb9833bbea97664eff9c1bccbdfb44eac15187ed515b64f36852feb2e52229aca82801383b04ae690f2a2f58d058058f3795f5765c3dfd93654241a4bcf5debc64cae2253dccce6b8bb90264aeed8f7a19c7afa0269e90ca9b14b1657124182b702c9e40e1f6c0298b48c59c3c7b8096ca658756f58f8b9a55d424446cc667ff5941877ddb70b875088297c4fb5ca6932e02ec84618b9c2a449a0f57ffd1847054570680d926074f09dcc8e0d8287b858e09066bcf10f320f3199fd77c565c9a9582c72e1e7cf92838ba999ad6275b3eff6b7f1b5a8992838d673ac6cd05015b95fbcbadd72cc23a508fd31a15b83ad3e5024a4c8a4e3e43d2aaed8511d4d49021e87038711a86e6032a8f3af5a4d131e46b6f5bea72e2a3b1046c461519614faff417be64e5f4b7ebb2eda466fd53d82ca353073da5c7241642c3d1d9bc7e76c8bcca498244e83b8e00675644673f09294dd0b054679ab88fc90c37c6814d3eb2c8df50c3ad6e637ed37910c3ce1701d548735b4f36b75c7966de2ab31f25f897eba8e63236b48bb9eef211cfe620609a75b2cfe507b29091358a20372670674928e0a5207461b0348e05f277bf31448a8c8adf022156df4b193994a654128137e3256cdb2011fd0189dec79cb95af212bae17a8e02d9b84f54dc0c0a986018adb6c670882b69e45251af32ae3eebfdfb01679f8a748e3d28cd503c6760c77dfac20fc0f2f5a60f06ff84cfb60911993ceec1979315439b65ba613f4918523c10da4f066e9daaae9c301469b3037b74be6a18fed95e7849d412b8f4fcb5b412ff013f0713be7f37e81dd09ee97624f2f87fcaa05d0625a35ceb9d4a7650171a915d09cb57caa68a54ab95ba330b7c0e803ab620ac28a0d6c66d965e32fd6abf39e670be5ffdafb676a8bfbe72b89e5d8e9d5205243fca29ddfb83bf179ea2952b8c9af09ca3633e59a39a290db6a08a3ee8c90733b13f0cd4f5e387269feab17118dd6cd5d25ad22810b5660026fadcf6bf4a2afc56ca67c135de3b8df4a8a433aa38cceb3677d23ae729c768b12b8e2b9a3b27fc2455be7e51a99f0422bf4055018230b591b39819f1be3864d2e64586f12dced1f616a91896d6f11612a3a72c040cd0049c3d7c460f554766a45741a79ff5998ff1212815d6f6e17eb46f01608a07f01b9b88c8c9b73ccebd3789a915494dc955420251d2d5a2e273be0656eb68a83bcae3fc38cdec2682ff5791fb7600b8e43e3a7e679eb0e1aad3945b2c02a7e91b16a0e08c99a411fd5b96d597528cedbe541711b5f93a8a997af13f671a58d6d44a0c6b582a4541d82656a7cbe30c4cff58276bf30755db60ba4af553530e399cb4a224ade04e06b7453d91bff9f4742c938d2ed439063b7885361f92b89fa58c51a0e27446c3579e21be654319f66a1dc49dd71907b50760fdd92874a70c3f6b9c5de48a3c7444115e457739ccdfe094620c610eb15386548c77542e38ac5e08eee3e32832f76e63ba6104c6621d7e8ae2ee448cd1121b6fdfc6bd05086e007db4f9d57389b509a5106aa25a02d8199264a9c8a19619f3943f74a5746b250a40bf0b410951679558fb85573345ad004b510e7f61f33f2b6f9b5193a2d5b5171781e8e90aa7a32a94be5f1919862b96b2798eeac4af42a91ea30fa9c4989193f17cd34ef9d3d9d9640faff437d6d50933ea7fafc833b78fc8284d614928ecc794ba0ea1c09e9d100e7c2659759e193ee5b55588eb0b76ac49b4bfecdb967aa2c41a3343a58ce8b4be312393ae200aa4c9bc1bcac07fda826f3cc44390cfb4e6f6c137dc20bca1ee05279e63575d7003731cdb5ab980200b7aa13906f852aa3cb676bcee992f76c9968b87444add82c75a6fb95bb45e1132ec68530548609ada69c16ac07f63fae974210b54d89f43960ca20aa843c523541dd7dfaaf76b9508df242831e2a34442586725ca3c7588e3ec36c5a469a02868bb6b9f3c7337428d14e78e8ac56158769203f2a33b42cb4e9708a3110ed1ae8499591d5089938e1bb25df257b47bdc1156463cb8bf1de65ae9337f3521cf76cc5d2205f6df908f10c427d6b86db45a4a8f1fba48d759d1d105df286630818047e3a2d7425c2d77344596a5d2129f2d63f7d08a474f587428f6eb2bf94cd007723faf608aae0706eeeec61763f505c1b2a8b5d490269d1ca44f2e3d2ad386881298501cf1b665970cc81531793b0786d750a842f900ecc6c6c033959117af5aef4edcab1a0cea830f290b423da287b5af549acda62f3ecab8af079873c8346b6bd75d49700bd4ceaf226a8694e4c81944a2534f0977d74c396aeb9526e3362998decfcb43f2d8d391d2d029ede7610e9c131c70559e80b5dd6ff32c0b6bd78cc85658c0f7ab9953490390de169ab3db43a4f3fb676351bd20705c16a3edde6f7040cafbb113d4183709e48488868964dfb7d279393c7ca0da61d2a748bbacdfc9e5d0b36d1e9c60e952fec8330dd3ac24f632cbe0d877c439add4816a8165383c17078a5eefd0fc75a5da3a15cd16b97b71dfa3a86183004ad96e75efa7bc9589dae6272db91364a16e4dcffdc970a6968ecea9928fdf948cc21dae2c84b1a3d08c00951e8f7b8f9ad103873b1e006409107260494a178f4cbb44c602ad8d0a24ec28ee6e75d425c189e6d3c9207a22708375c0a0dd8be831cc0dc5301504d383e5db6a476c120402a027ca2e4659ffc0146614162185a02df18c2bee4bf893faf47621b8b6296b1faecab3ab7f56c100e951bd9986ef2f5feb1e3921ebce17f91b749ef2c3086a9c44988e09472220c7d776852826acc8047ddf98fe28bee2104b570efc608ef9b5b1f06d6cba141361cdd873eeafe9eaf7f1f60fc714ac68457e0393558456937687a02c35e66d768e8253b0396cd63be5b9150f6be533d3069d87c67338e093d462a026d39401f71499953f0e779ae47a8096b4008f59b88a49561105b00703ea1377ffb0bba225abb10657e47703a48685fcfcc41b258293b8810d1e5040f67e12d2b8cb086f3fa2dac0c9150b828864efb701b75fb88cbac0907787067498a0570d770047abec243c38c46b2432e8d44635b8d1cda1494e9cbf802100a494c92933b9de660197f885c21c50f780d20a27374e8befdab2985e22d1c97e459e51ca71a32df9f502ca5c6a8e06ca1f31c577d9cf714f70cb0f87a2ca88010a3e9ad4a7bd705f274b053cbbacad7d266c69a8de0bc93436dc69ca8ec01f780332ee98c00caa8afff00d39797b06a7417703d2ae541a1455ddc7597e04d41d851d35da3d9d95e55a138ae916b102ffd9bb05b709f8e27063cbece54d025d9ad874376948d9c1e42b7eaecd1546e98d8430b1cea4d27125e3ee4ca33b5537b573894a60b012c9dad49104635c296e91509bcfec88c25077ade91a9901ee72286033f6a2bfb4db27f64f1b897a22f756aaf15acdf529fbb1eb8c7824c15753d6b3cdb9d7cd70d6d221ba69b10d08ca389ff1500dff46675b60268411742082351d426be0fb006ef6024b461e2ec3af4556875a5f8fb1c832cd88f895b6a1fa28de11d1fd320815db4f40df65abc8fc1da1382026f0860978e3223f7b6e7462c6d8360b346c897b2ff537b5cb8828470ea7532dfcc1f1f86892e90b81d87c3e1f11ddcbac60b7ab4c351db54dee963b70d5898b15633bc644d77a5ee1f8032191f99f6cf61476a2f693d599b5693bb23775d0663d791fee7e4c60fcb2606acb40110ac851fadac8eeac7260906374c21de159326f601c07fdbdeb3e8c29df624f2524fe70136c56526eb282bfc800561d1cdab49d66b03424d08098719a8fe2fe5a9514c2bdb5d59dd7874d8e7ca5de678b3a43b6d4819bfc54b562eabe1b591b3f1e0422b6ba5ed68b130144da892c14cd722222f2a2601782cd1bd685a5da13de06de57c68ff9741ee1c5216a11c62d01a25817b603c5b7148ded9ef5ccc990a27a6163e0ceec0b10dba7f187cf6be3a8e25d4272e86945e29d95e89d41969c9d092d9fafffebfd7b5d35660fef465d40e745957bd1f71c073495580876a745af705e32d2e38afd8feca71a8e957047fe53384af1edbad9c30da0288578b3b57a6469363a239ade6e59aa762d67020cc724dd66a604ff1ebc73dbdea53792472b2723700d0601521148c4453d3ccf4c0a2aec0868f5ecca2e5ba7dfb2159b3e8154440d6728946f938f5dadb8740d773f1c53b9a53edbb97c143a78e3b46b41e0d59b03c99e6c608d5928cf141cacb9a70f0a9cf51731fccf95ac03235cb18c48b424e1d7cdda5666ae4696098cfc751538c4d371c241d4f097169688cdf879eacb8a15888ce5f7717e4eea97c0056991f28850aa482a5c2194fd2c2219f4893604d35842d1ee7b9128bbb4ac4c96fd23f758ad516822b97663f2ec180a18146f22d4779d81bd366ce6aa4110f484487ddf975eb8dc5c7e1fcf58158ef15c588df99794d46d0da00188c92840dc875b1898513b38228c3dc779db82f26b7613da82c13e2e5141242c61b1ba74d0b877c0b63c96abc34ae8cc602abc486ecec8db6721007f1bec70afb801010b645d111ef781cd1d5596e0d09031be0883c42e72076e4e946e1d883becb4adfda5d25c9b13350b1fd19c1a4bbbd542ae766bd1eea57f1df94b7990af425574038969de670d519b921be2aae70f1fe115fc8b571a2a0b82894750196c3e15933c573583b43e725ea714794abce4690d05f0eb3477a6254f1af92ea56f22d9c7b54e3eb20b6827d60aa2f97148bcff415a86606503f24f2f3d5aec41ab3b956d0dde2719c6c73a9a8b6348d1c44a75901dea9536c2df33882da4e76d49c7c0efd4fae5054aeb3c0a8d3db2f04da7616679d76924543638012b1410079afcefeb7238b8c70b20183d10156d5868c1b9ca311c1657e82ee90f8a26c0dac5737aa951c8b6cd2c03b212ee29e5bf7e4fd09689bf29c775783dcbe0b834053a8b065f151a808b435f7e1443837cc9fc07bb59e58fecfd73ce9342012123ad531001128f7dc2660ccabde803e61597fc572207999eb7fb22bb1ab8b4368b862a34acc8652363ba636de3b0b16500dc3d96b6f957a79916595f424bed03850693d5b38d29a2aa923372331a564ce5ee4b1b260d2466da7776204a0fa3d509601f8f4308ce570b85adc8d5c0e17f2fdcd8b1c21e3af65811d540dcc317616aba4013350d60134437294b3fedd047f1090236e3e309d9741b1a2e438eec6bb95419095c1db300d48d1bbcd389795ddd966b9de8f7e58a5d27e9df97b5cecd5bda3f0302dacec56494378f7d91ebd694d09a1d98f9aaae2144b6b3a8868ffbd9f73847782163011a326fd6b365fe7ce5fb7f161df8ba9b3e6b0e148d9c6594b7993badc2202a9e07bdb3b0553f055c09f00d7ddc59815e369ddd2dcb3d03f056b5500d583d7bf6bfc6f2a478381e5b176c266be2b54c2941e5c1d1ac781eceed3cb8e244a49f29f434398e5a621d891dc0c27928520395848947b2903c65cfed9530909f29869e454184d7cbdc2b868ff06f3f7068f549ae15bfb94b5f0841989c1aee550e65607a1c5057af73d790ac17aeb9103afe4b57cbdd5e8a4ad6fefd603bdc0ea921389"}, @INET_DIAG_REQ_BYTECODE={0xe7, 0x1, "1e5ecf201184aa6941375d88f6adc10d4559f78d1f0fae6d60ca413ef246a2319179f1014a4a320b553fc27636b079b36c343eea85b0d6df48fb36c2ff2ce35c432ec279ea9ade208dc357e6897d7ee8a1e7470f85ef180317362398b1651ada1d2fd106ae567f34c40f5efec9cbfdf65d626df6388272e12381b62a19f40a966e9afd5053093d1fca5ce254f207068e0425051e72cbb07cfdca495353fe5f00b9b98d934743c2a348c2777f2ccc06b83312660d0c39ba7cfc5fb8143f5aaa4127b57d9ec62a838c83e8967fc9f807012a71cba250c07e58c690411739369fcc4192dc"}, @INET_DIAG_REQ_BYTECODE={0x64, 0x1, "e86c36b92938c775ca74ffd523649ed98dd9b4e1068d097ccdd387b61e9331ffbb011afbd4198233b45fc81541108fd9fa92391552bbd800422783aa41f35ed2e2e8964cbe7819483c1ef2359b21344099b2572a348d7d337dc56259c4e07476"}, @INET_DIAG_REQ_BYTECODE={0x1e, 0x1, "a918d86f530eab750f2dd69512b4c21413c67a4d00ee146e71a9"}, @INET_DIAG_REQ_BYTECODE={0xc5, 0x1, "06afdad2c236dc026c8a0e52d7f5a3eabb60ab6c0fcfc71c1dc437cd2165a15c09540e0e81c8d5206e306d82dfbf2966ed5d63dcf68052575a30e47b2a9c2cc5b94b334c48706a466c83c49db38480bd5b79d2133dead1d1d060663d8bf0f32aea47930b7d74490242f78058c1c74c7aecb2a13331617b1440ece4ec5e3201b88cba145e6b8777676d6ba32b051b5f4a115952d69c915a449df6222154dde5cdea075621dd91a9b6fc1553d34e96b996d2eb33ba52c01bd1652aba558dcecb4340"}, @INET_DIAG_REQ_BYTECODE={0xff, 0x1, "28496674a7ed1043fcf22409b60a84384b05e9bc56883fcee2fd6c098082234356f4fd2e80e0f8eb1b3908e7f980d7b915b7468a5f8bc075cf5a6dbd086d109c81ce3bea28a608355f5280a1dc1d0d69e858ac4c8588886f5756f09e95f2bfa6a76418fafacc4e48fe9f371788f81ba6f5ad3288540c5d3e1207d4a47362e598566f90537d2d8fb607e3e81c39fa9ff09e0b7be2c2677e82761bbee6c94bbf5db3f5511690567ffb8a998444e45e87b268cc9fdad44bc8cddb7e12b8bfa6fd12f8120b3538405d9101714548a8a5af4a1ab030e32358d221187e23bf60e3d54cd740fcaf3661af8827f5a085f5e70814353ae5abd39d479ebac144"}, @INET_DIAG_REQ_BYTECODE={0x77, 0x1, "97f56bd28ec36e9784d2cfed30f3e3c68831d3aa6017490b72bd98098c2ee8d62593c271388fcdd85fcb409037a1b67456affe487cb4116d4fed68af0ec2e849efb1d8c475de7c4ed09d9440b2ae04fb54b124817465804ea78f9e9311b82e9d28f15fa3afbd16ed8ad3b501eeb69f1a1deec5"}, @INET_DIAG_REQ_BYTECODE={0x1a, 0x1, "78a76a96a491b62384c79a08ce7151d88222a5617653"}, @INET_DIAG_REQ_BYTECODE={0x14, 0x1, "5a51bcb7bdbbdf70d7a15c2919cdbab8"}]}, 0x13f4}, 0x1, 0x0, 0x0, 0x800}, 0x8800) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) r4 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r5 = openat$cgroup_ro(r4, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r5, 0x40305828, &(0x7f0000001780)={0x2f, 0x1, 0x0, 0x5fffff}) r6 = socket$inet_sctp(0x2, 0x1, 0x84) getsockopt$inet_sctp_SCTP_MAX_BURST(r6, 0x84, 0xd, &(0x7f0000000000)=@assoc_value, &(0x7f00000000c0)=0x8) ioctl$FS_IOC_RESVSP(r6, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r5, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) sendmsg$NL80211_CMD_DEL_PMK(r0, &(0x7f0000000500)={&(0x7f00000002c0)={0x10, 0x0, 0x0, 0x2000}, 0xc, &(0x7f00000004c0)={&(0x7f0000000400)={0x38, r2, 0x10, 0x70bd2d, 0x25dfdbfd, {{}, {@void, @val={0xc, 0x99, {0x3d0f201f, 0x5f}}}}, [@NL80211_ATTR_MAC={0xa}, @NL80211_ATTR_MAC={0xa, 0x6, @broadcast}]}, 0x38}, 0x1, 0x0, 0x0, 0x4}, 0x20000094) write$tun(r5, 0x0, 0x0) r7 = socket$nl_generic(0x10, 0x3, 0x10) r8 = syz_genetlink_get_family_id$mptcp(&(0x7f0000000300), 0xffffffffffffffff) sendmsg$MPTCP_PM_CMD_DEL_ADDR(r7, &(0x7f0000000480)={0x0, 0x0, &(0x7f0000000440)={&(0x7f0000000340)={0x20, r8, 0x1, 0x0, 0x0, {}, [@MPTCP_PM_ATTR_ADDR={0xc, 0x1, 0x0, 0x1, [@MPTCP_PM_ADDR_ATTR_FAMILY={0x6}]}]}, 0x20}}, 0x0) sendmsg$MPTCP_PM_CMD_GET_ADDR(r5, &(0x7f0000000200)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x4000}, 0xc, &(0x7f00000001c0)={&(0x7f0000000180)={0x2c, r8, 0x1, 0x70bd26, 0x25dfdbfb, {}, [@MPTCP_PM_ATTR_SUBFLOWS={0x8, 0x3, 0x7}, @MPTCP_PM_ATTR_SUBFLOWS={0x8, 0x3, 0x8}, @MPTCP_PM_ATTR_RCV_ADD_ADDRS={0x8, 0x2, 0x5}]}, 0x2c}, 0x1, 0x0, 0x0, 0x44000}, 0x8000) sendmsg$nl_route(r3, 0x0, 0x0) [ 2205.450130][ T8733] bond1460: (slave bridge1355): making interface the new active one [ 2205.479713][ T8733] bond1460: (slave bridge1355): Enslaving as an active interface with an up link [ 2205.605225][ T8769] EXT4-fs warning: 4 callbacks suppressed [ 2205.607651][ T8769] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:42:21 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = accept(r0, 0x0, &(0x7f0000000000)) setsockopt$XDP_TX_RING(r1, 0x11b, 0x3, &(0x7f0000000080)=0x1000, 0x4) [ 2205.650907][ T8758] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 10:42:21 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = socket$alg(0x26, 0x5, 0x0) ioctl$F2FS_IOC_DEFRAGMENT(r1, 0xc010f508, &(0x7f0000000000)={0x3, 0xffffffffffffffff}) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2205.837288][ T8758] 8021q: adding VLAN 0 to HW filter on device bond1423 [ 2205.912136][ T8786] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2206.027605][ T8760] bridge1285: entered promiscuous mode [ 2206.035220][ T8760] bridge1285: entered allmulticast mode 10:42:22 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xffffff9e}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:22 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) r1 = accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) sendmsg$nl_route(r1, &(0x7f00000000c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x200}, 0xc, &(0x7f0000000080)={&(0x7f0000000040)=@ipv4_getrule={0x1c, 0x22, 0x200, 0x70bd28, 0x25dfdbfc, {0x2, 0x0, 0x10, 0x1f, 0x9, 0x0, 0x0, 0x2, 0x1}, [""]}, 0x1c}, 0x1, 0x0, 0x0, 0x4000800}, 0x400) (async) r2 = syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r3 = socket$netlink(0x10, 0x3, 0x0) (async) sendmsg$SOCK_DIAG_BY_FAMILY(r1, &(0x7f00000005c0)={&(0x7f0000000540)={0x10, 0x0, 0x0, 0x80}, 0xc, &(0x7f0000000580)={&(0x7f00000017c0)={0x13f4, 0x14, 0x0, 0x70bd2c, 0x25dfdbfe, {0x22, 0x3f}, [@INET_DIAG_REQ_BYTECODE={0x1004, 0x1, "1e6ec9f7b6bc3ec9d617afe6cd0594d5b0f4326dcfd76b66b67d0b31cd9d220f73cc4acbcd9275a9429048fb746b00e22bd5aeef5d39702a327cbe87f000d6cfe52036eba14d72af982451c36f607484eb6cf26e49ba40c80d970c3f78ddb079c787471a71de61383e0bb215d9ce3201bafdb9833bbea97664eff9c1bccbdfb44eac15187ed515b64f36852feb2e52229aca82801383b04ae690f2a2f58d058058f3795f5765c3dfd93654241a4bcf5debc64cae2253dccce6b8bb90264aeed8f7a19c7afa0269e90ca9b14b1657124182b702c9e40e1f6c0298b48c59c3c7b8096ca658756f58f8b9a55d424446cc667ff5941877ddb70b875088297c4fb5ca6932e02ec84618b9c2a449a0f57ffd1847054570680d926074f09dcc8e0d8287b858e09066bcf10f320f3199fd77c565c9a9582c72e1e7cf92838ba999ad6275b3eff6b7f1b5a8992838d673ac6cd05015b95fbcbadd72cc23a508fd31a15b83ad3e5024a4c8a4e3e43d2aaed8511d4d49021e87038711a86e6032a8f3af5a4d131e46b6f5bea72e2a3b1046c461519614faff417be64e5f4b7ebb2eda466fd53d82ca353073da5c7241642c3d1d9bc7e76c8bcca498244e83b8e00675644673f09294dd0b054679ab88fc90c37c6814d3eb2c8df50c3ad6e637ed37910c3ce1701d548735b4f36b75c7966de2ab31f25f897eba8e63236b48bb9eef211cfe620609a75b2cfe507b29091358a20372670674928e0a5207461b0348e05f277bf31448a8c8adf022156df4b193994a654128137e3256cdb2011fd0189dec79cb95af212bae17a8e02d9b84f54dc0c0a986018adb6c670882b69e45251af32ae3eebfdfb01679f8a748e3d28cd503c6760c77dfac20fc0f2f5a60f06ff84cfb60911993ceec1979315439b65ba613f4918523c10da4f066e9daaae9c301469b3037b74be6a18fed95e7849d412b8f4fcb5b412ff013f0713be7f37e81dd09ee97624f2f87fcaa05d0625a35ceb9d4a7650171a915d09cb57caa68a54ab95ba330b7c0e803ab620ac28a0d6c66d965e32fd6abf39e670be5ffdafb676a8bfbe72b89e5d8e9d5205243fca29ddfb83bf179ea2952b8c9af09ca3633e59a39a290db6a08a3ee8c90733b13f0cd4f5e387269feab17118dd6cd5d25ad22810b5660026fadcf6bf4a2afc56ca67c135de3b8df4a8a433aa38cceb3677d23ae729c768b12b8e2b9a3b27fc2455be7e51a99f0422bf4055018230b591b39819f1be3864d2e64586f12dced1f616a91896d6f11612a3a72c040cd0049c3d7c460f554766a45741a79ff5998ff1212815d6f6e17eb46f01608a07f01b9b88c8c9b73ccebd3789a915494dc955420251d2d5a2e273be0656eb68a83bcae3fc38cdec2682ff5791fb7600b8e43e3a7e679eb0e1aad3945b2c02a7e91b16a0e08c99a411fd5b96d597528cedbe541711b5f93a8a997af13f671a58d6d44a0c6b582a4541d82656a7cbe30c4cff58276bf30755db60ba4af553530e399cb4a224ade04e06b7453d91bff9f4742c938d2ed439063b7885361f92b89fa58c51a0e27446c3579e21be654319f66a1dc49dd71907b50760fdd92874a70c3f6b9c5de48a3c7444115e457739ccdfe094620c610eb15386548c77542e38ac5e08eee3e32832f76e63ba6104c6621d7e8ae2ee448cd1121b6fdfc6bd05086e007db4f9d57389b509a5106aa25a02d8199264a9c8a19619f3943f74a5746b250a40bf0b410951679558fb85573345ad004b510e7f61f33f2b6f9b5193a2d5b5171781e8e90aa7a32a94be5f1919862b96b2798eeac4af42a91ea30fa9c4989193f17cd34ef9d3d9d9640faff437d6d50933ea7fafc833b78fc8284d614928ecc794ba0ea1c09e9d100e7c2659759e193ee5b55588eb0b76ac49b4bfecdb967aa2c41a3343a58ce8b4be312393ae200aa4c9bc1bcac07fda826f3cc44390cfb4e6f6c137dc20bca1ee05279e63575d7003731cdb5ab980200b7aa13906f852aa3cb676bcee992f76c9968b87444add82c75a6fb95bb45e1132ec68530548609ada69c16ac07f63fae974210b54d89f43960ca20aa843c523541dd7dfaaf76b9508df242831e2a34442586725ca3c7588e3ec36c5a469a02868bb6b9f3c7337428d14e78e8ac56158769203f2a33b42cb4e9708a3110ed1ae8499591d5089938e1bb25df257b47bdc1156463cb8bf1de65ae9337f3521cf76cc5d2205f6df908f10c427d6b86db45a4a8f1fba48d759d1d105df286630818047e3a2d7425c2d77344596a5d2129f2d63f7d08a474f587428f6eb2bf94cd007723faf608aae0706eeeec61763f505c1b2a8b5d490269d1ca44f2e3d2ad386881298501cf1b665970cc81531793b0786d750a842f900ecc6c6c033959117af5aef4edcab1a0cea830f290b423da287b5af549acda62f3ecab8af079873c8346b6bd75d49700bd4ceaf226a8694e4c81944a2534f0977d74c396aeb9526e3362998decfcb43f2d8d391d2d029ede7610e9c131c70559e80b5dd6ff32c0b6bd78cc85658c0f7ab9953490390de169ab3db43a4f3fb676351bd20705c16a3edde6f7040cafbb113d4183709e48488868964dfb7d279393c7ca0da61d2a748bbacdfc9e5d0b36d1e9c60e952fec8330dd3ac24f632cbe0d877c439add4816a8165383c17078a5eefd0fc75a5da3a15cd16b97b71dfa3a86183004ad96e75efa7bc9589dae6272db91364a16e4dcffdc970a6968ecea9928fdf948cc21dae2c84b1a3d08c00951e8f7b8f9ad103873b1e006409107260494a178f4cbb44c602ad8d0a24ec28ee6e75d425c189e6d3c9207a22708375c0a0dd8be831cc0dc5301504d383e5db6a476c120402a027ca2e4659ffc0146614162185a02df18c2bee4bf893faf47621b8b6296b1faecab3ab7f56c100e951bd9986ef2f5feb1e3921ebce17f91b749ef2c3086a9c44988e09472220c7d776852826acc8047ddf98fe28bee2104b570efc608ef9b5b1f06d6cba141361cdd873eeafe9eaf7f1f60fc714ac68457e0393558456937687a02c35e66d768e8253b0396cd63be5b9150f6be533d3069d87c67338e093d462a026d39401f71499953f0e779ae47a8096b4008f59b88a49561105b00703ea1377ffb0bba225abb10657e47703a48685fcfcc41b258293b8810d1e5040f67e12d2b8cb086f3fa2dac0c9150b828864efb701b75fb88cbac0907787067498a0570d770047abec243c38c46b2432e8d44635b8d1cda1494e9cbf802100a494c92933b9de660197f885c21c50f780d20a27374e8befdab2985e22d1c97e459e51ca71a32df9f502ca5c6a8e06ca1f31c577d9cf714f70cb0f87a2ca88010a3e9ad4a7bd705f274b053cbbacad7d266c69a8de0bc93436dc69ca8ec01f780332ee98c00caa8afff00d39797b06a7417703d2ae541a1455ddc7597e04d41d851d35da3d9d95e55a138ae916b102ffd9bb05b709f8e27063cbece54d025d9ad874376948d9c1e42b7eaecd1546e98d8430b1cea4d27125e3ee4ca33b5537b573894a60b012c9dad49104635c296e91509bcfec88c25077ade91a9901ee72286033f6a2bfb4db27f64f1b897a22f756aaf15acdf529fbb1eb8c7824c15753d6b3cdb9d7cd70d6d221ba69b10d08ca389ff1500dff46675b60268411742082351d426be0fb006ef6024b461e2ec3af4556875a5f8fb1c832cd88f895b6a1fa28de11d1fd320815db4f40df65abc8fc1da1382026f0860978e3223f7b6e7462c6d8360b346c897b2ff537b5cb8828470ea7532dfcc1f1f86892e90b81d87c3e1f11ddcbac60b7ab4c351db54dee963b70d5898b15633bc644d77a5ee1f8032191f99f6cf61476a2f693d599b5693bb23775d0663d791fee7e4c60fcb2606acb40110ac851fadac8eeac7260906374c21de159326f601c07fdbdeb3e8c29df624f2524fe70136c56526eb282bfc800561d1cdab49d66b03424d08098719a8fe2fe5a9514c2bdb5d59dd7874d8e7ca5de678b3a43b6d4819bfc54b562eabe1b591b3f1e0422b6ba5ed68b130144da892c14cd722222f2a2601782cd1bd685a5da13de06de57c68ff9741ee1c5216a11c62d01a25817b603c5b7148ded9ef5ccc990a27a6163e0ceec0b10dba7f187cf6be3a8e25d4272e86945e29d95e89d41969c9d092d9fafffebfd7b5d35660fef465d40e745957bd1f71c073495580876a745af705e32d2e38afd8feca71a8e957047fe53384af1edbad9c30da0288578b3b57a6469363a239ade6e59aa762d67020cc724dd66a604ff1ebc73dbdea53792472b2723700d0601521148c4453d3ccf4c0a2aec0868f5ecca2e5ba7dfb2159b3e8154440d6728946f938f5dadb8740d773f1c53b9a53edbb97c143a78e3b46b41e0d59b03c99e6c608d5928cf141cacb9a70f0a9cf51731fccf95ac03235cb18c48b424e1d7cdda5666ae4696098cfc751538c4d371c241d4f097169688cdf879eacb8a15888ce5f7717e4eea97c0056991f28850aa482a5c2194fd2c2219f4893604d35842d1ee7b9128bbb4ac4c96fd23f758ad516822b97663f2ec180a18146f22d4779d81bd366ce6aa4110f484487ddf975eb8dc5c7e1fcf58158ef15c588df99794d46d0da00188c92840dc875b1898513b38228c3dc779db82f26b7613da82c13e2e5141242c61b1ba74d0b877c0b63c96abc34ae8cc602abc486ecec8db6721007f1bec70afb801010b645d111ef781cd1d5596e0d09031be0883c42e72076e4e946e1d883becb4adfda5d25c9b13350b1fd19c1a4bbbd542ae766bd1eea57f1df94b7990af425574038969de670d519b921be2aae70f1fe115fc8b571a2a0b82894750196c3e15933c573583b43e725ea714794abce4690d05f0eb3477a6254f1af92ea56f22d9c7b54e3eb20b6827d60aa2f97148bcff415a86606503f24f2f3d5aec41ab3b956d0dde2719c6c73a9a8b6348d1c44a75901dea9536c2df33882da4e76d49c7c0efd4fae5054aeb3c0a8d3db2f04da7616679d76924543638012b1410079afcefeb7238b8c70b20183d10156d5868c1b9ca311c1657e82ee90f8a26c0dac5737aa951c8b6cd2c03b212ee29e5bf7e4fd09689bf29c775783dcbe0b834053a8b065f151a808b435f7e1443837cc9fc07bb59e58fecfd73ce9342012123ad531001128f7dc2660ccabde803e61597fc572207999eb7fb22bb1ab8b4368b862a34acc8652363ba636de3b0b16500dc3d96b6f957a79916595f424bed03850693d5b38d29a2aa923372331a564ce5ee4b1b260d2466da7776204a0fa3d509601f8f4308ce570b85adc8d5c0e17f2fdcd8b1c21e3af65811d540dcc317616aba4013350d60134437294b3fedd047f1090236e3e309d9741b1a2e438eec6bb95419095c1db300d48d1bbcd389795ddd966b9de8f7e58a5d27e9df97b5cecd5bda3f0302dacec56494378f7d91ebd694d09a1d98f9aaae2144b6b3a8868ffbd9f73847782163011a326fd6b365fe7ce5fb7f161df8ba9b3e6b0e148d9c6594b7993badc2202a9e07bdb3b0553f055c09f00d7ddc59815e369ddd2dcb3d03f056b5500d583d7bf6bfc6f2a478381e5b176c266be2b54c2941e5c1d1ac781eceed3cb8e244a49f29f434398e5a621d891dc0c27928520395848947b2903c65cfed9530909f29869e454184d7cbdc2b868ff06f3f7068f549ae15bfb94b5f0841989c1aee550e65607a1c5057af73d790ac17aeb9103afe4b57cbdd5e8a4ad6fefd603bdc0ea921389"}, @INET_DIAG_REQ_BYTECODE={0xe7, 0x1, "1e5ecf201184aa6941375d88f6adc10d4559f78d1f0fae6d60ca413ef246a2319179f1014a4a320b553fc27636b079b36c343eea85b0d6df48fb36c2ff2ce35c432ec279ea9ade208dc357e6897d7ee8a1e7470f85ef180317362398b1651ada1d2fd106ae567f34c40f5efec9cbfdf65d626df6388272e12381b62a19f40a966e9afd5053093d1fca5ce254f207068e0425051e72cbb07cfdca495353fe5f00b9b98d934743c2a348c2777f2ccc06b83312660d0c39ba7cfc5fb8143f5aaa4127b57d9ec62a838c83e8967fc9f807012a71cba250c07e58c690411739369fcc4192dc"}, @INET_DIAG_REQ_BYTECODE={0x64, 0x1, "e86c36b92938c775ca74ffd523649ed98dd9b4e1068d097ccdd387b61e9331ffbb011afbd4198233b45fc81541108fd9fa92391552bbd800422783aa41f35ed2e2e8964cbe7819483c1ef2359b21344099b2572a348d7d337dc56259c4e07476"}, @INET_DIAG_REQ_BYTECODE={0x1e, 0x1, "a918d86f530eab750f2dd69512b4c21413c67a4d00ee146e71a9"}, @INET_DIAG_REQ_BYTECODE={0xc5, 0x1, "06afdad2c236dc026c8a0e52d7f5a3eabb60ab6c0fcfc71c1dc437cd2165a15c09540e0e81c8d5206e306d82dfbf2966ed5d63dcf68052575a30e47b2a9c2cc5b94b334c48706a466c83c49db38480bd5b79d2133dead1d1d060663d8bf0f32aea47930b7d74490242f78058c1c74c7aecb2a13331617b1440ece4ec5e3201b88cba145e6b8777676d6ba32b051b5f4a115952d69c915a449df6222154dde5cdea075621dd91a9b6fc1553d34e96b996d2eb33ba52c01bd1652aba558dcecb4340"}, @INET_DIAG_REQ_BYTECODE={0xff, 0x1, "28496674a7ed1043fcf22409b60a84384b05e9bc56883fcee2fd6c098082234356f4fd2e80e0f8eb1b3908e7f980d7b915b7468a5f8bc075cf5a6dbd086d109c81ce3bea28a608355f5280a1dc1d0d69e858ac4c8588886f5756f09e95f2bfa6a76418fafacc4e48fe9f371788f81ba6f5ad3288540c5d3e1207d4a47362e598566f90537d2d8fb607e3e81c39fa9ff09e0b7be2c2677e82761bbee6c94bbf5db3f5511690567ffb8a998444e45e87b268cc9fdad44bc8cddb7e12b8bfa6fd12f8120b3538405d9101714548a8a5af4a1ab030e32358d221187e23bf60e3d54cd740fcaf3661af8827f5a085f5e70814353ae5abd39d479ebac144"}, @INET_DIAG_REQ_BYTECODE={0x77, 0x1, "97f56bd28ec36e9784d2cfed30f3e3c68831d3aa6017490b72bd98098c2ee8d62593c271388fcdd85fcb409037a1b67456affe487cb4116d4fed68af0ec2e849efb1d8c475de7c4ed09d9440b2ae04fb54b124817465804ea78f9e9311b82e9d28f15fa3afbd16ed8ad3b501eeb69f1a1deec5"}, @INET_DIAG_REQ_BYTECODE={0x1a, 0x1, "78a76a96a491b62384c79a08ce7151d88222a5617653"}, @INET_DIAG_REQ_BYTECODE={0x14, 0x1, "5a51bcb7bdbbdf70d7a15c2919cdbab8"}]}, 0x13f4}, 0x1, 0x0, 0x0, 0x800}, 0x8800) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) r4 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r5 = openat$cgroup_ro(r4, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r5, 0x40305828, &(0x7f0000001780)={0x2f, 0x1, 0x0, 0x5fffff}) (async, rerun: 64) r6 = socket$inet_sctp(0x2, 0x1, 0x84) (rerun: 64) getsockopt$inet_sctp_SCTP_MAX_BURST(r6, 0x84, 0xd, &(0x7f0000000000)=@assoc_value, &(0x7f00000000c0)=0x8) (async) ioctl$FS_IOC_RESVSP(r6, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) (async) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r5, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) sendmsg$NL80211_CMD_DEL_PMK(r0, &(0x7f0000000500)={&(0x7f00000002c0)={0x10, 0x0, 0x0, 0x2000}, 0xc, &(0x7f00000004c0)={&(0x7f0000000400)={0x38, r2, 0x10, 0x70bd2d, 0x25dfdbfd, {{}, {@void, @val={0xc, 0x99, {0x3d0f201f, 0x5f}}}}, [@NL80211_ATTR_MAC={0xa}, @NL80211_ATTR_MAC={0xa, 0x6, @broadcast}]}, 0x38}, 0x1, 0x0, 0x0, 0x4}, 0x20000094) (async) write$tun(r5, 0x0, 0x0) (async) r7 = socket$nl_generic(0x10, 0x3, 0x10) r8 = syz_genetlink_get_family_id$mptcp(&(0x7f0000000300), 0xffffffffffffffff) sendmsg$MPTCP_PM_CMD_DEL_ADDR(r7, &(0x7f0000000480)={0x0, 0x0, &(0x7f0000000440)={&(0x7f0000000340)={0x20, r8, 0x1, 0x0, 0x0, {}, [@MPTCP_PM_ATTR_ADDR={0xc, 0x1, 0x0, 0x1, [@MPTCP_PM_ADDR_ATTR_FAMILY={0x6}]}]}, 0x20}}, 0x0) (async) sendmsg$MPTCP_PM_CMD_GET_ADDR(r5, &(0x7f0000000200)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x4000}, 0xc, &(0x7f00000001c0)={&(0x7f0000000180)={0x2c, r8, 0x1, 0x70bd26, 0x25dfdbfb, {}, [@MPTCP_PM_ATTR_SUBFLOWS={0x8, 0x3, 0x7}, @MPTCP_PM_ATTR_SUBFLOWS={0x8, 0x3, 0x8}, @MPTCP_PM_ATTR_RCV_ADD_ADDRS={0x8, 0x2, 0x5}]}, 0x2c}, 0x1, 0x0, 0x0, 0x44000}, 0x8000) (async) sendmsg$nl_route(r3, 0x0, 0x0) 10:42:22 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r1 = accept$packet(r0, &(0x7f0000000080)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @random}, &(0x7f00000000c0)=0x14) splice(r1, &(0x7f0000000100)=0x70, 0xffffffffffffffff, &(0x7f0000000180)=0x60, 0x1, 0x0) pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r2) ioctl$TUNGETVNETLE(r2, 0x800454dd, &(0x7f0000000000)) 10:42:22 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = socket$alg(0x26, 0x5, 0x0) ioctl$F2FS_IOC_DEFRAGMENT(r1, 0xc010f508, &(0x7f0000000000)={0x3, 0xffffffffffffffff}) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2206.171147][ T8774] netlink: 'syz-executor.3': attribute type 1 has an invalid length. [ 2206.232838][ T8774] 8021q: adding VLAN 0 to HW filter on device bond1461 [ 2206.266326][ T8789] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2206.332149][ T8798] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:42:22 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) r1 = accept$packet(r0, &(0x7f0000000080)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @random}, &(0x7f00000000c0)=0x14) splice(r1, &(0x7f0000000100)=0x70, 0xffffffffffffffff, &(0x7f0000000180)=0x60, 0x1, 0x0) pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r2) ioctl$TUNGETVNETLE(r2, 0x800454dd, &(0x7f0000000000)) [ 2206.529665][ T8808] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2206.600212][ T8782] bond1461: (slave bridge1356): making interface the new active one [ 2206.635820][ T8782] bond1461: (slave bridge1356): Enslaving as an active interface with an up link 10:42:22 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xa}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:22 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = socket$alg(0x26, 0x5, 0x0) ioctl$F2FS_IOC_DEFRAGMENT(r1, 0xc010f508, &(0x7f0000000000)={0x3, 0xffffffffffffffff}) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:22 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) r1 = accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) sendmsg$nl_route(r1, &(0x7f00000000c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x200}, 0xc, &(0x7f0000000080)={&(0x7f0000000040)=@ipv4_getrule={0x1c, 0x22, 0x200, 0x70bd28, 0x25dfdbfc, {0x2, 0x0, 0x10, 0x1f, 0x9, 0x0, 0x0, 0x2, 0x1}, [""]}, 0x1c}, 0x1, 0x0, 0x0, 0x4000800}, 0x400) r2 = syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r3 = socket$netlink(0x10, 0x3, 0x0) sendmsg$SOCK_DIAG_BY_FAMILY(r1, &(0x7f00000005c0)={&(0x7f0000000540)={0x10, 0x0, 0x0, 0x80}, 0xc, &(0x7f0000000580)={&(0x7f00000017c0)={0x13f4, 0x14, 0x0, 0x70bd2c, 0x25dfdbfe, {0x22, 0x3f}, [@INET_DIAG_REQ_BYTECODE={0x1004, 0x1, "1e6ec9f7b6bc3ec9d617afe6cd0594d5b0f4326dcfd76b66b67d0b31cd9d220f73cc4acbcd9275a9429048fb746b00e22bd5aeef5d39702a327cbe87f000d6cfe52036eba14d72af982451c36f607484eb6cf26e49ba40c80d970c3f78ddb079c787471a71de61383e0bb215d9ce3201bafdb9833bbea97664eff9c1bccbdfb44eac15187ed515b64f36852feb2e52229aca82801383b04ae690f2a2f58d058058f3795f5765c3dfd93654241a4bcf5debc64cae2253dccce6b8bb90264aeed8f7a19c7afa0269e90ca9b14b1657124182b702c9e40e1f6c0298b48c59c3c7b8096ca658756f58f8b9a55d424446cc667ff5941877ddb70b875088297c4fb5ca6932e02ec84618b9c2a449a0f57ffd1847054570680d926074f09dcc8e0d8287b858e09066bcf10f320f3199fd77c565c9a9582c72e1e7cf92838ba999ad6275b3eff6b7f1b5a8992838d673ac6cd05015b95fbcbadd72cc23a508fd31a15b83ad3e5024a4c8a4e3e43d2aaed8511d4d49021e87038711a86e6032a8f3af5a4d131e46b6f5bea72e2a3b1046c461519614faff417be64e5f4b7ebb2eda466fd53d82ca353073da5c7241642c3d1d9bc7e76c8bcca498244e83b8e00675644673f09294dd0b054679ab88fc90c37c6814d3eb2c8df50c3ad6e637ed37910c3ce1701d548735b4f36b75c7966de2ab31f25f897eba8e63236b48bb9eef211cfe620609a75b2cfe507b29091358a20372670674928e0a5207461b0348e05f277bf31448a8c8adf022156df4b193994a654128137e3256cdb2011fd0189dec79cb95af212bae17a8e02d9b84f54dc0c0a986018adb6c670882b69e45251af32ae3eebfdfb01679f8a748e3d28cd503c6760c77dfac20fc0f2f5a60f06ff84cfb60911993ceec1979315439b65ba613f4918523c10da4f066e9daaae9c301469b3037b74be6a18fed95e7849d412b8f4fcb5b412ff013f0713be7f37e81dd09ee97624f2f87fcaa05d0625a35ceb9d4a7650171a915d09cb57caa68a54ab95ba330b7c0e803ab620ac28a0d6c66d965e32fd6abf39e670be5ffdafb676a8bfbe72b89e5d8e9d5205243fca29ddfb83bf179ea2952b8c9af09ca3633e59a39a290db6a08a3ee8c90733b13f0cd4f5e387269feab17118dd6cd5d25ad22810b5660026fadcf6bf4a2afc56ca67c135de3b8df4a8a433aa38cceb3677d23ae729c768b12b8e2b9a3b27fc2455be7e51a99f0422bf4055018230b591b39819f1be3864d2e64586f12dced1f616a91896d6f11612a3a72c040cd0049c3d7c460f554766a45741a79ff5998ff1212815d6f6e17eb46f01608a07f01b9b88c8c9b73ccebd3789a915494dc955420251d2d5a2e273be0656eb68a83bcae3fc38cdec2682ff5791fb7600b8e43e3a7e679eb0e1aad3945b2c02a7e91b16a0e08c99a411fd5b96d597528cedbe541711b5f93a8a997af13f671a58d6d44a0c6b582a4541d82656a7cbe30c4cff58276bf30755db60ba4af553530e399cb4a224ade04e06b7453d91bff9f4742c938d2ed439063b7885361f92b89fa58c51a0e27446c3579e21be654319f66a1dc49dd71907b50760fdd92874a70c3f6b9c5de48a3c7444115e457739ccdfe094620c610eb15386548c77542e38ac5e08eee3e32832f76e63ba6104c6621d7e8ae2ee448cd1121b6fdfc6bd05086e007db4f9d57389b509a5106aa25a02d8199264a9c8a19619f3943f74a5746b250a40bf0b410951679558fb85573345ad004b510e7f61f33f2b6f9b5193a2d5b5171781e8e90aa7a32a94be5f1919862b96b2798eeac4af42a91ea30fa9c4989193f17cd34ef9d3d9d9640faff437d6d50933ea7fafc833b78fc8284d614928ecc794ba0ea1c09e9d100e7c2659759e193ee5b55588eb0b76ac49b4bfecdb967aa2c41a3343a58ce8b4be312393ae200aa4c9bc1bcac07fda826f3cc44390cfb4e6f6c137dc20bca1ee05279e63575d7003731cdb5ab980200b7aa13906f852aa3cb676bcee992f76c9968b87444add82c75a6fb95bb45e1132ec68530548609ada69c16ac07f63fae974210b54d89f43960ca20aa843c523541dd7dfaaf76b9508df242831e2a34442586725ca3c7588e3ec36c5a469a02868bb6b9f3c7337428d14e78e8ac56158769203f2a33b42cb4e9708a3110ed1ae8499591d5089938e1bb25df257b47bdc1156463cb8bf1de65ae9337f3521cf76cc5d2205f6df908f10c427d6b86db45a4a8f1fba48d759d1d105df286630818047e3a2d7425c2d77344596a5d2129f2d63f7d08a474f587428f6eb2bf94cd007723faf608aae0706eeeec61763f505c1b2a8b5d490269d1ca44f2e3d2ad386881298501cf1b665970cc81531793b0786d750a842f900ecc6c6c033959117af5aef4edcab1a0cea830f290b423da287b5af549acda62f3ecab8af079873c8346b6bd75d49700bd4ceaf226a8694e4c81944a2534f0977d74c396aeb9526e3362998decfcb43f2d8d391d2d029ede7610e9c131c70559e80b5dd6ff32c0b6bd78cc85658c0f7ab9953490390de169ab3db43a4f3fb676351bd20705c16a3edde6f7040cafbb113d4183709e48488868964dfb7d279393c7ca0da61d2a748bbacdfc9e5d0b36d1e9c60e952fec8330dd3ac24f632cbe0d877c439add4816a8165383c17078a5eefd0fc75a5da3a15cd16b97b71dfa3a86183004ad96e75efa7bc9589dae6272db91364a16e4dcffdc970a6968ecea9928fdf948cc21dae2c84b1a3d08c00951e8f7b8f9ad103873b1e006409107260494a178f4cbb44c602ad8d0a24ec28ee6e75d425c189e6d3c9207a22708375c0a0dd8be831cc0dc5301504d383e5db6a476c120402a027ca2e4659ffc0146614162185a02df18c2bee4bf893faf47621b8b6296b1faecab3ab7f56c100e951bd9986ef2f5feb1e3921ebce17f91b749ef2c3086a9c44988e09472220c7d776852826acc8047ddf98fe28bee2104b570efc608ef9b5b1f06d6cba141361cdd873eeafe9eaf7f1f60fc714ac68457e0393558456937687a02c35e66d768e8253b0396cd63be5b9150f6be533d3069d87c67338e093d462a026d39401f71499953f0e779ae47a8096b4008f59b88a49561105b00703ea1377ffb0bba225abb10657e47703a48685fcfcc41b258293b8810d1e5040f67e12d2b8cb086f3fa2dac0c9150b828864efb701b75fb88cbac0907787067498a0570d770047abec243c38c46b2432e8d44635b8d1cda1494e9cbf802100a494c92933b9de660197f885c21c50f780d20a27374e8befdab2985e22d1c97e459e51ca71a32df9f502ca5c6a8e06ca1f31c577d9cf714f70cb0f87a2ca88010a3e9ad4a7bd705f274b053cbbacad7d266c69a8de0bc93436dc69ca8ec01f780332ee98c00caa8afff00d39797b06a7417703d2ae541a1455ddc7597e04d41d851d35da3d9d95e55a138ae916b102ffd9bb05b709f8e27063cbece54d025d9ad874376948d9c1e42b7eaecd1546e98d8430b1cea4d27125e3ee4ca33b5537b573894a60b012c9dad49104635c296e91509bcfec88c25077ade91a9901ee72286033f6a2bfb4db27f64f1b897a22f756aaf15acdf529fbb1eb8c7824c15753d6b3cdb9d7cd70d6d221ba69b10d08ca389ff1500dff46675b60268411742082351d426be0fb006ef6024b461e2ec3af4556875a5f8fb1c832cd88f895b6a1fa28de11d1fd320815db4f40df65abc8fc1da1382026f0860978e3223f7b6e7462c6d8360b346c897b2ff537b5cb8828470ea7532dfcc1f1f86892e90b81d87c3e1f11ddcbac60b7ab4c351db54dee963b70d5898b15633bc644d77a5ee1f8032191f99f6cf61476a2f693d599b5693bb23775d0663d791fee7e4c60fcb2606acb40110ac851fadac8eeac7260906374c21de159326f601c07fdbdeb3e8c29df624f2524fe70136c56526eb282bfc800561d1cdab49d66b03424d08098719a8fe2fe5a9514c2bdb5d59dd7874d8e7ca5de678b3a43b6d4819bfc54b562eabe1b591b3f1e0422b6ba5ed68b130144da892c14cd722222f2a2601782cd1bd685a5da13de06de57c68ff9741ee1c5216a11c62d01a25817b603c5b7148ded9ef5ccc990a27a6163e0ceec0b10dba7f187cf6be3a8e25d4272e86945e29d95e89d41969c9d092d9fafffebfd7b5d35660fef465d40e745957bd1f71c073495580876a745af705e32d2e38afd8feca71a8e957047fe53384af1edbad9c30da0288578b3b57a6469363a239ade6e59aa762d67020cc724dd66a604ff1ebc73dbdea53792472b2723700d0601521148c4453d3ccf4c0a2aec0868f5ecca2e5ba7dfb2159b3e8154440d6728946f938f5dadb8740d773f1c53b9a53edbb97c143a78e3b46b41e0d59b03c99e6c608d5928cf141cacb9a70f0a9cf51731fccf95ac03235cb18c48b424e1d7cdda5666ae4696098cfc751538c4d371c241d4f097169688cdf879eacb8a15888ce5f7717e4eea97c0056991f28850aa482a5c2194fd2c2219f4893604d35842d1ee7b9128bbb4ac4c96fd23f758ad516822b97663f2ec180a18146f22d4779d81bd366ce6aa4110f484487ddf975eb8dc5c7e1fcf58158ef15c588df99794d46d0da00188c92840dc875b1898513b38228c3dc779db82f26b7613da82c13e2e5141242c61b1ba74d0b877c0b63c96abc34ae8cc602abc486ecec8db6721007f1bec70afb801010b645d111ef781cd1d5596e0d09031be0883c42e72076e4e946e1d883becb4adfda5d25c9b13350b1fd19c1a4bbbd542ae766bd1eea57f1df94b7990af425574038969de670d519b921be2aae70f1fe115fc8b571a2a0b82894750196c3e15933c573583b43e725ea714794abce4690d05f0eb3477a6254f1af92ea56f22d9c7b54e3eb20b6827d60aa2f97148bcff415a86606503f24f2f3d5aec41ab3b956d0dde2719c6c73a9a8b6348d1c44a75901dea9536c2df33882da4e76d49c7c0efd4fae5054aeb3c0a8d3db2f04da7616679d76924543638012b1410079afcefeb7238b8c70b20183d10156d5868c1b9ca311c1657e82ee90f8a26c0dac5737aa951c8b6cd2c03b212ee29e5bf7e4fd09689bf29c775783dcbe0b834053a8b065f151a808b435f7e1443837cc9fc07bb59e58fecfd73ce9342012123ad531001128f7dc2660ccabde803e61597fc572207999eb7fb22bb1ab8b4368b862a34acc8652363ba636de3b0b16500dc3d96b6f957a79916595f424bed03850693d5b38d29a2aa923372331a564ce5ee4b1b260d2466da7776204a0fa3d509601f8f4308ce570b85adc8d5c0e17f2fdcd8b1c21e3af65811d540dcc317616aba4013350d60134437294b3fedd047f1090236e3e309d9741b1a2e438eec6bb95419095c1db300d48d1bbcd389795ddd966b9de8f7e58a5d27e9df97b5cecd5bda3f0302dacec56494378f7d91ebd694d09a1d98f9aaae2144b6b3a8868ffbd9f73847782163011a326fd6b365fe7ce5fb7f161df8ba9b3e6b0e148d9c6594b7993badc2202a9e07bdb3b0553f055c09f00d7ddc59815e369ddd2dcb3d03f056b5500d583d7bf6bfc6f2a478381e5b176c266be2b54c2941e5c1d1ac781eceed3cb8e244a49f29f434398e5a621d891dc0c27928520395848947b2903c65cfed9530909f29869e454184d7cbdc2b868ff06f3f7068f549ae15bfb94b5f0841989c1aee550e65607a1c5057af73d790ac17aeb9103afe4b57cbdd5e8a4ad6fefd603bdc0ea921389"}, @INET_DIAG_REQ_BYTECODE={0xe7, 0x1, "1e5ecf201184aa6941375d88f6adc10d4559f78d1f0fae6d60ca413ef246a2319179f1014a4a320b553fc27636b079b36c343eea85b0d6df48fb36c2ff2ce35c432ec279ea9ade208dc357e6897d7ee8a1e7470f85ef180317362398b1651ada1d2fd106ae567f34c40f5efec9cbfdf65d626df6388272e12381b62a19f40a966e9afd5053093d1fca5ce254f207068e0425051e72cbb07cfdca495353fe5f00b9b98d934743c2a348c2777f2ccc06b83312660d0c39ba7cfc5fb8143f5aaa4127b57d9ec62a838c83e8967fc9f807012a71cba250c07e58c690411739369fcc4192dc"}, @INET_DIAG_REQ_BYTECODE={0x64, 0x1, "e86c36b92938c775ca74ffd523649ed98dd9b4e1068d097ccdd387b61e9331ffbb011afbd4198233b45fc81541108fd9fa92391552bbd800422783aa41f35ed2e2e8964cbe7819483c1ef2359b21344099b2572a348d7d337dc56259c4e07476"}, @INET_DIAG_REQ_BYTECODE={0x1e, 0x1, "a918d86f530eab750f2dd69512b4c21413c67a4d00ee146e71a9"}, @INET_DIAG_REQ_BYTECODE={0xc5, 0x1, "06afdad2c236dc026c8a0e52d7f5a3eabb60ab6c0fcfc71c1dc437cd2165a15c09540e0e81c8d5206e306d82dfbf2966ed5d63dcf68052575a30e47b2a9c2cc5b94b334c48706a466c83c49db38480bd5b79d2133dead1d1d060663d8bf0f32aea47930b7d74490242f78058c1c74c7aecb2a13331617b1440ece4ec5e3201b88cba145e6b8777676d6ba32b051b5f4a115952d69c915a449df6222154dde5cdea075621dd91a9b6fc1553d34e96b996d2eb33ba52c01bd1652aba558dcecb4340"}, @INET_DIAG_REQ_BYTECODE={0xff, 0x1, "28496674a7ed1043fcf22409b60a84384b05e9bc56883fcee2fd6c098082234356f4fd2e80e0f8eb1b3908e7f980d7b915b7468a5f8bc075cf5a6dbd086d109c81ce3bea28a608355f5280a1dc1d0d69e858ac4c8588886f5756f09e95f2bfa6a76418fafacc4e48fe9f371788f81ba6f5ad3288540c5d3e1207d4a47362e598566f90537d2d8fb607e3e81c39fa9ff09e0b7be2c2677e82761bbee6c94bbf5db3f5511690567ffb8a998444e45e87b268cc9fdad44bc8cddb7e12b8bfa6fd12f8120b3538405d9101714548a8a5af4a1ab030e32358d221187e23bf60e3d54cd740fcaf3661af8827f5a085f5e70814353ae5abd39d479ebac144"}, @INET_DIAG_REQ_BYTECODE={0x77, 0x1, "97f56bd28ec36e9784d2cfed30f3e3c68831d3aa6017490b72bd98098c2ee8d62593c271388fcdd85fcb409037a1b67456affe487cb4116d4fed68af0ec2e849efb1d8c475de7c4ed09d9440b2ae04fb54b124817465804ea78f9e9311b82e9d28f15fa3afbd16ed8ad3b501eeb69f1a1deec5"}, @INET_DIAG_REQ_BYTECODE={0x1a, 0x1, "78a76a96a491b62384c79a08ce7151d88222a5617653"}, @INET_DIAG_REQ_BYTECODE={0x14, 0x1, "5a51bcb7bdbbdf70d7a15c2919cdbab8"}]}, 0x13f4}, 0x1, 0x0, 0x0, 0x800}, 0x8800) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) r4 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r5 = openat$cgroup_ro(r4, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r5, 0x40305828, &(0x7f0000001780)={0x2f, 0x1, 0x0, 0x5fffff}) (async) r6 = socket$inet_sctp(0x2, 0x1, 0x84) getsockopt$inet_sctp_SCTP_MAX_BURST(r6, 0x84, 0xd, &(0x7f0000000000)=@assoc_value, &(0x7f00000000c0)=0x8) ioctl$FS_IOC_RESVSP(r6, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) (async) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r5, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) sendmsg$NL80211_CMD_DEL_PMK(r0, &(0x7f0000000500)={&(0x7f00000002c0)={0x10, 0x0, 0x0, 0x2000}, 0xc, &(0x7f00000004c0)={&(0x7f0000000400)={0x38, r2, 0x10, 0x70bd2d, 0x25dfdbfd, {{}, {@void, @val={0xc, 0x99, {0x3d0f201f, 0x5f}}}}, [@NL80211_ATTR_MAC={0xa}, @NL80211_ATTR_MAC={0xa, 0x6, @broadcast}]}, 0x38}, 0x1, 0x0, 0x0, 0x4}, 0x20000094) write$tun(r5, 0x0, 0x0) (async) r7 = socket$nl_generic(0x10, 0x3, 0x10) (async) r8 = syz_genetlink_get_family_id$mptcp(&(0x7f0000000300), 0xffffffffffffffff) sendmsg$MPTCP_PM_CMD_DEL_ADDR(r7, &(0x7f0000000480)={0x0, 0x0, &(0x7f0000000440)={&(0x7f0000000340)={0x20, r8, 0x1, 0x0, 0x0, {}, [@MPTCP_PM_ATTR_ADDR={0xc, 0x1, 0x0, 0x1, [@MPTCP_PM_ADDR_ATTR_FAMILY={0x6}]}]}, 0x20}}, 0x0) (async) sendmsg$MPTCP_PM_CMD_GET_ADDR(r5, &(0x7f0000000200)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x4000}, 0xc, &(0x7f00000001c0)={&(0x7f0000000180)={0x2c, r8, 0x1, 0x70bd26, 0x25dfdbfb, {}, [@MPTCP_PM_ATTR_SUBFLOWS={0x8, 0x3, 0x7}, @MPTCP_PM_ATTR_SUBFLOWS={0x8, 0x3, 0x8}, @MPTCP_PM_ATTR_RCV_ADD_ADDRS={0x8, 0x2, 0x5}]}, 0x2c}, 0x1, 0x0, 0x0, 0x44000}, 0x8000) sendmsg$nl_route(r3, 0x0, 0x0) [ 2206.689972][ T8781] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2206.741884][ T8781] workqueue: Failed to create a rescuer kthread for wq "bond861": -EINTR [ 2206.843425][ T8821] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2206.892052][ T8796] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 10:42:23 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xfffffff0}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:42:23 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) r1 = accept$packet(r0, &(0x7f0000000080)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @random}, &(0x7f00000000c0)=0x14) splice(r1, &(0x7f0000000100)=0x70, 0xffffffffffffffff, &(0x7f0000000180)=0x60, 0x1, 0x0) (async) pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r2) (async) ioctl$TUNGETVNETLE(r2, 0x800454dd, &(0x7f0000000000)) [ 2207.029167][ T8827] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2207.029826][ T8796] 8021q: adding VLAN 0 to HW filter on device bond1424 [ 2207.187501][ T8157] BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low! [ 2207.193169][ T8157] turning off the locking correctness validator. [ 2207.199758][ T8157] CPU: 0 PID: 8157 Comm: kworker/u4:0 Not tainted 6.4.0-rc7-syzkaller-01944-g3674fbf0451d #0 [ 2207.209928][ T8157] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 05/27/2023 [ 2207.220001][ T8157] Workqueue: bond1265 bond_resend_igmp_join_requests_delayed [ 2207.227454][ T8157] Call Trace: [ 2207.230739][ T8157] [ 2207.233677][ T8157] dump_stack_lvl+0xd9/0x150 [ 2207.238299][ T8157] __lock_acquire+0x434b/0x5f30 [ 2207.243192][ T8157] ? unwind_next_frame+0xdf3/0x1e30 [ 2207.248415][ T8157] ? lockdep_hardirqs_on_prepare+0x410/0x410 [ 2207.254426][ T8157] ? unwind_next_frame+0x331/0x1e30 [ 2207.259647][ T8157] ? ret_from_fork+0x1f/0x30 [ 2207.264311][ T8157] ? ret_from_fork+0x1f/0x30 [ 2207.269017][ T8157] lock_acquire+0x1b1/0x520 [ 2207.273548][ T8157] ? ref_tracker_alloc+0x1ee/0x580 [ 2207.278954][ T8157] ? lock_sync+0x190/0x190 [ 2207.283408][ T8157] ? ret_from_fork+0x1f/0x30 [ 2207.288029][ T8157] ? stack_trace_save+0x90/0xc0 [ 2207.292907][ T8157] ? _raw_spin_lock_irqsave+0x52/0x60 [ 2207.298304][ T8157] _raw_spin_lock_irqsave+0x3d/0x60 [ 2207.303527][ T8157] ? ref_tracker_alloc+0x1ee/0x580 [ 2207.308665][ T8157] ref_tracker_alloc+0x1ee/0x580 [ 2207.313634][ T8157] ? ref_tracker_free+0x820/0x820 [ 2207.319734][ T8157] ? dst_init+0xe5/0x590 [ 2207.324080][ T8157] ? dst_alloc+0xc3/0x1a0 [ 2207.329743][ T8157] ? ip6_dst_alloc+0x32/0xa0 [ 2207.334609][ T8157] ? icmp6_dst_alloc+0x71/0x670 [ 2207.339533][ T8157] ? mld_sendpack+0x5de/0xed0 [ 2207.344302][ T8157] ? mld_send_report+0x90/0x320 [ 2207.349186][ T8157] ? ipv6_mc_netdev_event+0x288/0x610 [ 2207.354595][ T8157] ? notifier_call_chain+0xb6/0x3c0 [ 2207.359824][ T8157] ? call_netdevice_notifiers_info+0xb9/0x130 [ 2207.365916][ T8157] ? call_netdevice_notifiers+0x7d/0xb0 [ 2207.371486][ T8157] ? bond_resend_igmp_join_requests_delayed+0x61/0x180 [ 2207.378367][ T8157] ? process_one_work+0x99a/0x15e0 [ 2207.383516][ T8157] ? worker_thread+0x67d/0x10c0 [ 2207.388398][ T8157] ? kthread+0x344/0x440 [ 2207.392667][ T8157] ? ret_from_fork+0x1f/0x30 [ 2207.397292][ T8157] ? find_held_lock+0x2d/0x110 [ 2207.402084][ T8157] dst_init+0xe5/0x590 [ 2207.406190][ T8157] dst_alloc+0xc3/0x1a0 [ 2207.410381][ T8157] ip6_dst_alloc+0x32/0xa0 [ 2207.414832][ T8157] icmp6_dst_alloc+0x71/0x670 [ 2207.419546][ T8157] ? icmpv6_flow_init+0x3d/0x280 [ 2207.424532][ T8157] mld_sendpack+0x5de/0xed0 [ 2207.429156][ T8157] ? nf_hook.constprop.0+0x5b0/0x5b0 [ 2207.434571][ T8157] mld_send_report+0x90/0x320 [ 2207.439277][ T8157] ipv6_mc_netdev_event+0x288/0x610 [ 2207.444590][ T8157] notifier_call_chain+0xb6/0x3c0 [ 2207.449659][ T8157] call_netdevice_notifiers_info+0xb9/0x130 [ 2207.455583][ T8157] call_netdevice_notifiers+0x7d/0xb0 [ 2207.461072][ T8157] ? netdev_state_change+0x130/0x130 [ 2207.466402][ T8157] bond_resend_igmp_join_requests_delayed+0x61/0x180 [ 2207.473170][ T8157] process_one_work+0x99a/0x15e0 [ 2207.478156][ T8157] ? pwq_dec_nr_in_flight+0x2a0/0x2a0 [ 2207.483568][ T8157] ? spin_bug+0x1c0/0x1c0 10:42:23 executing program 0: r0 = bpf$BPF_PROG_WITH_BTFID_LOAD(0x5, &(0x7f0000000240)=@bpf_ext={0x1c, 0xa, &(0x7f0000000000)=@raw=[@map_val={0x18, 0x9, 0x2, 0x0, 0xffffffffffffffff, 0x0, 0x0, 0x0, 0x4}, @initr0={0x18, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x44cb}, @map_fd={0x18, 0x7, 0x1, 0x0, 0x1}, @map_val={0x18, 0xd, 0x2, 0x0, 0x1, 0x0, 0x0, 0x0, 0xa2}, @func={0x85, 0x0, 0x1, 0x0, 0xfffffffffffffffa}, @exit], &(0x7f0000000080)='GPL\x00', 0xfffff67b, 0x72, &(0x7f00000000c0)=""/114, 0x41000, 0x12, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x8, &(0x7f0000000180)={0x6, 0x3}, 0x8, 0x10, &(0x7f00000001c0)={0x1, 0x9, 0x9, 0x7ff}, 0x10, 0x28e69, 0xffffffffffffffff, 0x0, &(0x7f0000000200)=[0xffffffffffffffff]}, 0x80) write$binfmt_misc(r0, &(0x7f00000002c0)={'syz1'}, 0x4) mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r1 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r2 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r2, 0x0, 0x0) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r4 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r4, r3, 0x0, 0x10000a006) getsockopt$inet6_IPV6_IPSEC_POLICY(r3, 0x29, 0x22, &(0x7f00000005c0)={{{@in6=@dev, @in=@private, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}}, {{@in=@empty}, 0x0, @in6=@dev}}, &(0x7f00000006c0)=0xe8) sendmsg$nl_generic(0xffffffffffffffff, &(0x7f0000000980)={&(0x7f0000000580)={0x10, 0x0, 0x0, 0x4000000}, 0xc, &(0x7f0000000940)={&(0x7f0000000700)={0x238, 0x21, 0x400, 0x70bd2b, 0x25dfdbfb, {0x2}, [@nested={0x9e, 0x8d, 0x0, 0x1, [@generic="dfcbc232952c8b1529afd9e9ffafbc538a8157d5ab9f22ffa3fd2f63ffe4acec99b86866b5cc36f6fb2df7ca55d190128bcf32c3a8f87426debb95bd7b9624139ebaa657cfc92930d2c45575060788ba1d24586abd626b73c0580c00c5512ac6db548b60e98cc93efb9ff948a8338c2b922dc35871164e48d14f4bec990d6b1abb7b6f12ad5f55e3933c0c3290fb4dae3ef739cbc138ace2c96e"]}, @nested={0x181, 0x84, 0x0, 0x1, [@generic="d8e63a536da3c5bb0086b9c4af4736f4b84bd7f60d33c865cefb57d21664dcee43d03fbeb8a9e99c856aa9787516e41f", @generic="9e80cb85750c013c961ab48c30ee618cdaeee0766e2a", @generic="9adae4c44c4610a24ebbfdfe9d0fb6b1f00c744f5898dbaf43ed2882a6ffdf01b1a5df1f6c78299406b36a89a3ed109c81338eae23c777ff", @typed={0x8, 0x4e, 0x0, 0x0, @uid=r5}, @generic="ce7509ed1f7200415caa876e7df7fd3b5cf8f3fc110e8c4e410701b822b810262ac036ce137812fd1633dabb209194c2201a0b4d7a8a48423133771ed1d25c04f03912c4106e9e232087aedb8088207b27f1694cc72f256c9f47281c1fcef7ae721efef74d4df66e6fb38698ba10720c9d711dbf3358e3994d821fb3adc4220d45d11a809643695936707e6b9f3edaeef0ff6f559d97fb01c2abd19565f69b91d66bd16fda896b5291c09c925ee120556fd4c18617b13915fce990bcccf04f6e08a408db6bff83b19595abaf64495e00a3a2d375e42502102e994c7d95cba54e0604f1f178dc35215fd7c8dfb783f2", @typed={0x8, 0xa, 0x0, 0x0, @ipv4=@loopback}]}]}, 0x238}, 0x1, 0x0, 0x0, 0x4000}, 0x4008081) ioctl$TUNSETOWNER(r1, 0x400454cc, r5) 10:42:23 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r2 = openat$cgroup_ro(r1, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r2, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r2, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r2, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r2, 0x0, 0x0) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) r4 = syz_genetlink_get_family_id$tipc2(&(0x7f0000001400), r3) sendmsg$TIPC_NL_NODE_GET(r3, &(0x7f0000001540)={&(0x7f00000013c0)={0x10, 0x0, 0x0, 0x80000000}, 0xc, &(0x7f0000001500)={&(0x7f0000001440)={0x88, r4, 0x400, 0x70bd2b, 0x25dfdbfd, {}, [@TIPC_NLA_SOCK={0x60, 0x2, 0x0, 0x1, [@TIPC_NLA_SOCK_ADDR={0x8, 0x1, 0xff}, @TIPC_NLA_SOCK_CON={0x4}, @TIPC_NLA_SOCK_REF={0x8, 0x2, 0x7fff}, @TIPC_NLA_SOCK_HAS_PUBL={0x4}, @TIPC_NLA_SOCK_ADDR={0x8, 0x1, 0x5}, @TIPC_NLA_SOCK_CON={0x2c, 0x3, 0x0, 0x1, [@TIPC_NLA_CON_FLAG={0x8, 0x1, 0x1}, @TIPC_NLA_CON_NODE={0x8, 0x2, 0xffff8d5b}, @TIPC_NLA_CON_NODE={0x8, 0x2, 0x3}, @TIPC_NLA_CON_FLAG={0x8, 0x1, 0x9}, @TIPC_NLA_CON_FLAG={0x8, 0x1, 0x4}]}, @TIPC_NLA_SOCK_REF={0x8}, @TIPC_NLA_SOCK_HAS_PUBL={0x4}, @TIPC_NLA_SOCK_HAS_PUBL={0x4}]}, @TIPC_NLA_MON={0x14, 0x9, 0x0, 0x1, [@TIPC_NLA_MON_REF={0x8, 0x2, 0x8}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0xdf}]}]}, 0x88}, 0x1, 0x0, 0x0, 0x4}, 0x1) sendmsg$TIPC_NL_BEARER_ADD(r2, &(0x7f00000002c0)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x4000}, 0xc, &(0x7f0000000280)={&(0x7f00000000c0)={0x1bc, r4, 0x4, 0x70bd2c, 0x25dfdbfe, {}, [@TIPC_NLA_MEDIA={0x7c, 0x5, 0x0, 0x1, [@TIPC_NLA_MEDIA_NAME={0x8, 0x1, 'eth\x00'}, @TIPC_NLA_MEDIA_PROP={0x44, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_WIN={0x8, 0x3, 0x1}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0x9}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x8000}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x7}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0xf}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x1}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x6}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x1f}]}, @TIPC_NLA_MEDIA_PROP={0x4}, @TIPC_NLA_MEDIA_PROP={0x24, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_TOL={0x8, 0x2, 0x8}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x100}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0x4}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x8001}]}, @TIPC_NLA_MEDIA_PROP={0x4}]}, @TIPC_NLA_MEDIA={0xa0, 0x5, 0x0, 0x1, [@TIPC_NLA_MEDIA_PROP={0x3c, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_MTU={0x8, 0x4, 0x5}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x1}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x1}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x1f}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0x4}, @TIPC_NLA_PROP_TOL={0x8}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x8001}]}, @TIPC_NLA_MEDIA_NAME={0x7, 0x1, 'ib\x00'}, @TIPC_NLA_MEDIA_PROP={0x1c, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_MTU={0x8, 0x4, 0x4}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x5}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x4}]}, @TIPC_NLA_MEDIA_PROP={0x34, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_MTU={0x8, 0x4, 0x19e8bf94}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x3}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x1}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x7}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0xa}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0xaf61}]}, @TIPC_NLA_MEDIA_NAME={0x8, 0x1, 'udp\x00'}]}, @TIPC_NLA_NET={0x10, 0x7, 0x0, 0x1, [@TIPC_NLA_NET_NODEID={0xc, 0x3, 0x4}]}, @TIPC_NLA_SOCK={0x28, 0x2, 0x0, 0x1, [@TIPC_NLA_SOCK_CON={0x24, 0x3, 0x0, 0x1, [@TIPC_NLA_CON_NODE={0x8, 0x2, 0x2}, @TIPC_NLA_CON_NODE={0x8, 0x2, 0xffff}, @TIPC_NLA_CON_FLAG={0x8, 0x1, 0x7f}, @TIPC_NLA_CON_NODE={0x8, 0x2, 0x1f}]}]}, @TIPC_NLA_NET={0x54, 0x7, 0x0, 0x1, [@TIPC_NLA_NET_NODEID={0xc, 0x3, 0x9}, @TIPC_NLA_NET_ID={0x8, 0x1, 0x80}, @TIPC_NLA_NET_ID={0x8, 0x1, 0x2}, @TIPC_NLA_NET_NODEID={0xc, 0x3, 0x8fe5}, @TIPC_NLA_NET_ID={0x8, 0x1, 0x3}, @TIPC_NLA_NET_ID={0x8, 0x1, 0x8}, @TIPC_NLA_NET_NODEID={0xc, 0x3, 0xe68}, @TIPC_NLA_NET_NODEID={0xc, 0x3, 0x1}]}]}, 0x1bc}, 0x1, 0x0, 0x0, 0x8004}, 0x800) openat$cgroup_ro(r0, &(0x7f0000000000)='cpuset.effective_mems\x00', 0x0, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2207.487928][ T8157] ? _raw_spin_lock_irq+0x45/0x50 [ 2207.492992][ T8157] worker_thread+0x67d/0x10c0 [ 2207.497709][ T8157] ? process_one_work+0x15e0/0x15e0 [ 2207.502949][ T8157] kthread+0x344/0x440 [ 2207.507048][ T8157] ? kthread_complete_and_exit+0x40/0x40 [ 2207.512708][ T8157] ret_from_fork+0x1f/0x30 [ 2207.517164][ T8157] [ 2207.559718][ T8803] bridge1285: entered promiscuous mode [ 2207.579058][ T8837] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2207.590764][ T8803] bridge1285: entered allmulticast mode [ 2207.749166][ T8803] bond1424: (slave bridge1285): making interface the new active one [ 2207.781062][ T8803] bond1424: (slave bridge1285): Enslaving as an active interface with an up link 10:42:23 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xffffffa1}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:23 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffc03) 10:42:23 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) r1 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r2 = openat$cgroup_ro(r1, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r2, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r2, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r2, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r2, 0x0, 0x0) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) r4 = syz_genetlink_get_family_id$tipc2(&(0x7f0000001400), r3) sendmsg$TIPC_NL_NODE_GET(r3, &(0x7f0000001540)={&(0x7f00000013c0)={0x10, 0x0, 0x0, 0x80000000}, 0xc, &(0x7f0000001500)={&(0x7f0000001440)={0x88, r4, 0x400, 0x70bd2b, 0x25dfdbfd, {}, [@TIPC_NLA_SOCK={0x60, 0x2, 0x0, 0x1, [@TIPC_NLA_SOCK_ADDR={0x8, 0x1, 0xff}, @TIPC_NLA_SOCK_CON={0x4}, @TIPC_NLA_SOCK_REF={0x8, 0x2, 0x7fff}, @TIPC_NLA_SOCK_HAS_PUBL={0x4}, @TIPC_NLA_SOCK_ADDR={0x8, 0x1, 0x5}, @TIPC_NLA_SOCK_CON={0x2c, 0x3, 0x0, 0x1, [@TIPC_NLA_CON_FLAG={0x8, 0x1, 0x1}, @TIPC_NLA_CON_NODE={0x8, 0x2, 0xffff8d5b}, @TIPC_NLA_CON_NODE={0x8, 0x2, 0x3}, @TIPC_NLA_CON_FLAG={0x8, 0x1, 0x9}, @TIPC_NLA_CON_FLAG={0x8, 0x1, 0x4}]}, @TIPC_NLA_SOCK_REF={0x8}, @TIPC_NLA_SOCK_HAS_PUBL={0x4}, @TIPC_NLA_SOCK_HAS_PUBL={0x4}]}, @TIPC_NLA_MON={0x14, 0x9, 0x0, 0x1, [@TIPC_NLA_MON_REF={0x8, 0x2, 0x8}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0xdf}]}]}, 0x88}, 0x1, 0x0, 0x0, 0x4}, 0x1) sendmsg$TIPC_NL_BEARER_ADD(r2, &(0x7f00000002c0)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x4000}, 0xc, &(0x7f0000000280)={&(0x7f00000000c0)={0x1bc, r4, 0x4, 0x70bd2c, 0x25dfdbfe, {}, [@TIPC_NLA_MEDIA={0x7c, 0x5, 0x0, 0x1, [@TIPC_NLA_MEDIA_NAME={0x8, 0x1, 'eth\x00'}, @TIPC_NLA_MEDIA_PROP={0x44, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_WIN={0x8, 0x3, 0x1}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0x9}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x8000}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x7}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0xf}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x1}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x6}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x1f}]}, @TIPC_NLA_MEDIA_PROP={0x4}, @TIPC_NLA_MEDIA_PROP={0x24, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_TOL={0x8, 0x2, 0x8}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x100}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0x4}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x8001}]}, @TIPC_NLA_MEDIA_PROP={0x4}]}, @TIPC_NLA_MEDIA={0xa0, 0x5, 0x0, 0x1, [@TIPC_NLA_MEDIA_PROP={0x3c, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_MTU={0x8, 0x4, 0x5}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x1}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x1}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x1f}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0x4}, @TIPC_NLA_PROP_TOL={0x8}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x8001}]}, @TIPC_NLA_MEDIA_NAME={0x7, 0x1, 'ib\x00'}, @TIPC_NLA_MEDIA_PROP={0x1c, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_MTU={0x8, 0x4, 0x4}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x5}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x4}]}, @TIPC_NLA_MEDIA_PROP={0x34, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_MTU={0x8, 0x4, 0x19e8bf94}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x3}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x1}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x7}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0xa}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0xaf61}]}, @TIPC_NLA_MEDIA_NAME={0x8, 0x1, 'udp\x00'}]}, @TIPC_NLA_NET={0x10, 0x7, 0x0, 0x1, [@TIPC_NLA_NET_NODEID={0xc, 0x3, 0x4}]}, @TIPC_NLA_SOCK={0x28, 0x2, 0x0, 0x1, [@TIPC_NLA_SOCK_CON={0x24, 0x3, 0x0, 0x1, [@TIPC_NLA_CON_NODE={0x8, 0x2, 0x2}, @TIPC_NLA_CON_NODE={0x8, 0x2, 0xffff}, @TIPC_NLA_CON_FLAG={0x8, 0x1, 0x7f}, @TIPC_NLA_CON_NODE={0x8, 0x2, 0x1f}]}]}, @TIPC_NLA_NET={0x54, 0x7, 0x0, 0x1, [@TIPC_NLA_NET_NODEID={0xc, 0x3, 0x9}, @TIPC_NLA_NET_ID={0x8, 0x1, 0x80}, @TIPC_NLA_NET_ID={0x8, 0x1, 0x2}, @TIPC_NLA_NET_NODEID={0xc, 0x3, 0x8fe5}, @TIPC_NLA_NET_ID={0x8, 0x1, 0x3}, @TIPC_NLA_NET_ID={0x8, 0x1, 0x8}, @TIPC_NLA_NET_NODEID={0xc, 0x3, 0xe68}, @TIPC_NLA_NET_NODEID={0xc, 0x3, 0x1}]}]}, 0x1bc}, 0x1, 0x0, 0x0, 0x8004}, 0x800) openat$cgroup_ro(r0, &(0x7f0000000000)='cpuset.effective_mems\x00', 0x0, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) (async) openat$cgroup_ro(r1, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) (async) ioctl$FS_IOC_RESVSP(r2, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) (async) ioctl$FS_IOC_RESVSP(r2, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) (async) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r2, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) (async) write$tun(r2, 0x0, 0x0) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) (async) syz_genetlink_get_family_id$tipc2(&(0x7f0000001400), r3) (async) sendmsg$TIPC_NL_NODE_GET(r3, &(0x7f0000001540)={&(0x7f00000013c0)={0x10, 0x0, 0x0, 0x80000000}, 0xc, &(0x7f0000001500)={&(0x7f0000001440)={0x88, r4, 0x400, 0x70bd2b, 0x25dfdbfd, {}, [@TIPC_NLA_SOCK={0x60, 0x2, 0x0, 0x1, [@TIPC_NLA_SOCK_ADDR={0x8, 0x1, 0xff}, @TIPC_NLA_SOCK_CON={0x4}, @TIPC_NLA_SOCK_REF={0x8, 0x2, 0x7fff}, @TIPC_NLA_SOCK_HAS_PUBL={0x4}, @TIPC_NLA_SOCK_ADDR={0x8, 0x1, 0x5}, @TIPC_NLA_SOCK_CON={0x2c, 0x3, 0x0, 0x1, [@TIPC_NLA_CON_FLAG={0x8, 0x1, 0x1}, @TIPC_NLA_CON_NODE={0x8, 0x2, 0xffff8d5b}, @TIPC_NLA_CON_NODE={0x8, 0x2, 0x3}, @TIPC_NLA_CON_FLAG={0x8, 0x1, 0x9}, @TIPC_NLA_CON_FLAG={0x8, 0x1, 0x4}]}, @TIPC_NLA_SOCK_REF={0x8}, @TIPC_NLA_SOCK_HAS_PUBL={0x4}, @TIPC_NLA_SOCK_HAS_PUBL={0x4}]}, @TIPC_NLA_MON={0x14, 0x9, 0x0, 0x1, [@TIPC_NLA_MON_REF={0x8, 0x2, 0x8}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0xdf}]}]}, 0x88}, 0x1, 0x0, 0x0, 0x4}, 0x1) (async) sendmsg$TIPC_NL_BEARER_ADD(r2, &(0x7f00000002c0)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x4000}, 0xc, &(0x7f0000000280)={&(0x7f00000000c0)={0x1bc, r4, 0x4, 0x70bd2c, 0x25dfdbfe, {}, [@TIPC_NLA_MEDIA={0x7c, 0x5, 0x0, 0x1, [@TIPC_NLA_MEDIA_NAME={0x8, 0x1, 'eth\x00'}, @TIPC_NLA_MEDIA_PROP={0x44, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_WIN={0x8, 0x3, 0x1}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0x9}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x8000}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x7}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0xf}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x1}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x6}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x1f}]}, @TIPC_NLA_MEDIA_PROP={0x4}, @TIPC_NLA_MEDIA_PROP={0x24, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_TOL={0x8, 0x2, 0x8}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x100}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0x4}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x8001}]}, @TIPC_NLA_MEDIA_PROP={0x4}]}, @TIPC_NLA_MEDIA={0xa0, 0x5, 0x0, 0x1, [@TIPC_NLA_MEDIA_PROP={0x3c, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_MTU={0x8, 0x4, 0x5}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x1}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x1}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x1f}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0x4}, @TIPC_NLA_PROP_TOL={0x8}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x8001}]}, @TIPC_NLA_MEDIA_NAME={0x7, 0x1, 'ib\x00'}, @TIPC_NLA_MEDIA_PROP={0x1c, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_MTU={0x8, 0x4, 0x4}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x5}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x4}]}, @TIPC_NLA_MEDIA_PROP={0x34, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_MTU={0x8, 0x4, 0x19e8bf94}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x3}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x1}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x7}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0xa}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0xaf61}]}, @TIPC_NLA_MEDIA_NAME={0x8, 0x1, 'udp\x00'}]}, @TIPC_NLA_NET={0x10, 0x7, 0x0, 0x1, [@TIPC_NLA_NET_NODEID={0xc, 0x3, 0x4}]}, @TIPC_NLA_SOCK={0x28, 0x2, 0x0, 0x1, [@TIPC_NLA_SOCK_CON={0x24, 0x3, 0x0, 0x1, [@TIPC_NLA_CON_NODE={0x8, 0x2, 0x2}, @TIPC_NLA_CON_NODE={0x8, 0x2, 0xffff}, @TIPC_NLA_CON_FLAG={0x8, 0x1, 0x7f}, @TIPC_NLA_CON_NODE={0x8, 0x2, 0x1f}]}]}, @TIPC_NLA_NET={0x54, 0x7, 0x0, 0x1, [@TIPC_NLA_NET_NODEID={0xc, 0x3, 0x9}, @TIPC_NLA_NET_ID={0x8, 0x1, 0x80}, @TIPC_NLA_NET_ID={0x8, 0x1, 0x2}, @TIPC_NLA_NET_NODEID={0xc, 0x3, 0x8fe5}, @TIPC_NLA_NET_ID={0x8, 0x1, 0x3}, @TIPC_NLA_NET_ID={0x8, 0x1, 0x8}, @TIPC_NLA_NET_NODEID={0xc, 0x3, 0xe68}, @TIPC_NLA_NET_NODEID={0xc, 0x3, 0x1}]}]}, 0x1bc}, 0x1, 0x0, 0x0, 0x8004}, 0x800) (async) openat$cgroup_ro(r0, &(0x7f0000000000)='cpuset.effective_mems\x00', 0x0, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) 10:42:23 executing program 0: r0 = bpf$BPF_PROG_WITH_BTFID_LOAD(0x5, &(0x7f0000000240)=@bpf_ext={0x1c, 0xa, &(0x7f0000000000)=@raw=[@map_val={0x18, 0x9, 0x2, 0x0, 0xffffffffffffffff, 0x0, 0x0, 0x0, 0x4}, @initr0={0x18, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x44cb}, @map_fd={0x18, 0x7, 0x1, 0x0, 0x1}, @map_val={0x18, 0xd, 0x2, 0x0, 0x1, 0x0, 0x0, 0x0, 0xa2}, @func={0x85, 0x0, 0x1, 0x0, 0xfffffffffffffffa}, @exit], &(0x7f0000000080)='GPL\x00', 0xfffff67b, 0x72, &(0x7f00000000c0)=""/114, 0x41000, 0x12, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x8, &(0x7f0000000180)={0x6, 0x3}, 0x8, 0x10, &(0x7f00000001c0)={0x1, 0x9, 0x9, 0x7ff}, 0x10, 0x28e69, 0xffffffffffffffff, 0x0, &(0x7f0000000200)=[0xffffffffffffffff]}, 0x80) write$binfmt_misc(r0, &(0x7f00000002c0)={'syz1'}, 0x4) mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) r1 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (async) accept(0xffffffffffffffff, 0x0, 0x0) (async) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) r2 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) sendmsg$nl_route(r2, 0x0, 0x0) (async) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) (async) r4 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r4, r3, 0x0, 0x10000a006) (async) getsockopt$inet6_IPV6_IPSEC_POLICY(r3, 0x29, 0x22, &(0x7f00000005c0)={{{@in6=@dev, @in=@private, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}}, {{@in=@empty}, 0x0, @in6=@dev}}, &(0x7f00000006c0)=0xe8) sendmsg$nl_generic(0xffffffffffffffff, &(0x7f0000000980)={&(0x7f0000000580)={0x10, 0x0, 0x0, 0x4000000}, 0xc, &(0x7f0000000940)={&(0x7f0000000700)={0x238, 0x21, 0x400, 0x70bd2b, 0x25dfdbfb, {0x2}, [@nested={0x9e, 0x8d, 0x0, 0x1, [@generic="dfcbc232952c8b1529afd9e9ffafbc538a8157d5ab9f22ffa3fd2f63ffe4acec99b86866b5cc36f6fb2df7ca55d190128bcf32c3a8f87426debb95bd7b9624139ebaa657cfc92930d2c45575060788ba1d24586abd626b73c0580c00c5512ac6db548b60e98cc93efb9ff948a8338c2b922dc35871164e48d14f4bec990d6b1abb7b6f12ad5f55e3933c0c3290fb4dae3ef739cbc138ace2c96e"]}, @nested={0x181, 0x84, 0x0, 0x1, [@generic="d8e63a536da3c5bb0086b9c4af4736f4b84bd7f60d33c865cefb57d21664dcee43d03fbeb8a9e99c856aa9787516e41f", @generic="9e80cb85750c013c961ab48c30ee618cdaeee0766e2a", @generic="9adae4c44c4610a24ebbfdfe9d0fb6b1f00c744f5898dbaf43ed2882a6ffdf01b1a5df1f6c78299406b36a89a3ed109c81338eae23c777ff", @typed={0x8, 0x4e, 0x0, 0x0, @uid=r5}, @generic="ce7509ed1f7200415caa876e7df7fd3b5cf8f3fc110e8c4e410701b822b810262ac036ce137812fd1633dabb209194c2201a0b4d7a8a48423133771ed1d25c04f03912c4106e9e232087aedb8088207b27f1694cc72f256c9f47281c1fcef7ae721efef74d4df66e6fb38698ba10720c9d711dbf3358e3994d821fb3adc4220d45d11a809643695936707e6b9f3edaeef0ff6f559d97fb01c2abd19565f69b91d66bd16fda896b5291c09c925ee120556fd4c18617b13915fce990bcccf04f6e08a408db6bff83b19595abaf64495e00a3a2d375e42502102e994c7d95cba54e0604f1f178dc35215fd7c8dfb783f2", @typed={0x8, 0xa, 0x0, 0x0, @ipv4=@loopback}]}]}, 0x238}, 0x1, 0x0, 0x0, 0x4000}, 0x4008081) (async) ioctl$TUNSETOWNER(r1, 0x400454cc, r5) [ 2207.806825][ T8819] netlink: 'syz-executor.3': attribute type 1 has an invalid length. [ 2207.821240][ T8819] workqueue: Failed to create a rescuer kthread for wq "bond1462": -EINTR [ 2207.846510][ T8844] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:42:23 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) r1 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r2 = openat$cgroup_ro(r1, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r2, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) (async) ioctl$FS_IOC_RESVSP(r2, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f0000000300)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r2, @ANYBLOB="0000000001000000183300000400000000000000000000004501400010000000184200000800000000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff0100"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) write$tun(r2, 0x0, 0x0) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) r4 = syz_genetlink_get_family_id$tipc2(&(0x7f0000001400), r3) sendmsg$TIPC_NL_NODE_GET(r3, &(0x7f0000001540)={&(0x7f00000013c0)={0x10, 0x0, 0x0, 0x80000000}, 0xc, &(0x7f0000001500)={&(0x7f0000001440)={0x88, r4, 0x400, 0x70bd2b, 0x25dfdbfd, {}, [@TIPC_NLA_SOCK={0x60, 0x2, 0x0, 0x1, [@TIPC_NLA_SOCK_ADDR={0x8, 0x1, 0xff}, @TIPC_NLA_SOCK_CON={0x4}, @TIPC_NLA_SOCK_REF={0x8, 0x2, 0x7fff}, @TIPC_NLA_SOCK_HAS_PUBL={0x4}, @TIPC_NLA_SOCK_ADDR={0x8, 0x1, 0x5}, @TIPC_NLA_SOCK_CON={0x2c, 0x3, 0x0, 0x1, [@TIPC_NLA_CON_FLAG={0x8, 0x1, 0x1}, @TIPC_NLA_CON_NODE={0x8, 0x2, 0xffff8d5b}, @TIPC_NLA_CON_NODE={0x8, 0x2, 0x3}, @TIPC_NLA_CON_FLAG={0x8, 0x1, 0x9}, @TIPC_NLA_CON_FLAG={0x8, 0x1, 0x4}]}, @TIPC_NLA_SOCK_REF={0x8}, @TIPC_NLA_SOCK_HAS_PUBL={0x4}, @TIPC_NLA_SOCK_HAS_PUBL={0x4}]}, @TIPC_NLA_MON={0x14, 0x9, 0x0, 0x1, [@TIPC_NLA_MON_REF={0x8, 0x2, 0x8}, @TIPC_NLA_MON_ACTIVATION_THRESHOLD={0x8, 0x1, 0xdf}]}]}, 0x88}, 0x1, 0x0, 0x0, 0x4}, 0x1) sendmsg$TIPC_NL_BEARER_ADD(r2, &(0x7f00000002c0)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x4000}, 0xc, &(0x7f0000000280)={&(0x7f00000000c0)={0x1bc, r4, 0x4, 0x70bd2c, 0x25dfdbfe, {}, [@TIPC_NLA_MEDIA={0x7c, 0x5, 0x0, 0x1, [@TIPC_NLA_MEDIA_NAME={0x8, 0x1, 'eth\x00'}, @TIPC_NLA_MEDIA_PROP={0x44, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_WIN={0x8, 0x3, 0x1}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0x9}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x8000}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x7}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0xf}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x1}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x6}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x1f}]}, @TIPC_NLA_MEDIA_PROP={0x4}, @TIPC_NLA_MEDIA_PROP={0x24, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_TOL={0x8, 0x2, 0x8}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x100}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0x4}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x8001}]}, @TIPC_NLA_MEDIA_PROP={0x4}]}, @TIPC_NLA_MEDIA={0xa0, 0x5, 0x0, 0x1, [@TIPC_NLA_MEDIA_PROP={0x3c, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_MTU={0x8, 0x4, 0x5}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x1}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x1}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0x1f}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0x4}, @TIPC_NLA_PROP_TOL={0x8}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x8001}]}, @TIPC_NLA_MEDIA_NAME={0x7, 0x1, 'ib\x00'}, @TIPC_NLA_MEDIA_PROP={0x1c, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_MTU={0x8, 0x4, 0x4}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x5}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x4}]}, @TIPC_NLA_MEDIA_PROP={0x34, 0x2, 0x0, 0x1, [@TIPC_NLA_PROP_MTU={0x8, 0x4, 0x19e8bf94}, @TIPC_NLA_PROP_TOL={0x8, 0x2, 0x3}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x1}, @TIPC_NLA_PROP_MTU={0x8, 0x4, 0x7}, @TIPC_NLA_PROP_PRIO={0x8, 0x1, 0xa}, @TIPC_NLA_PROP_WIN={0x8, 0x3, 0xaf61}]}, @TIPC_NLA_MEDIA_NAME={0x8, 0x1, 'udp\x00'}]}, @TIPC_NLA_NET={0x10, 0x7, 0x0, 0x1, [@TIPC_NLA_NET_NODEID={0xc, 0x3, 0x4}]}, @TIPC_NLA_SOCK={0x28, 0x2, 0x0, 0x1, [@TIPC_NLA_SOCK_CON={0x24, 0x3, 0x0, 0x1, [@TIPC_NLA_CON_NODE={0x8, 0x2, 0x2}, @TIPC_NLA_CON_NODE={0x8, 0x2, 0xffff}, @TIPC_NLA_CON_FLAG={0x8, 0x1, 0x7f}, @TIPC_NLA_CON_NODE={0x8, 0x2, 0x1f}]}]}, @TIPC_NLA_NET={0x54, 0x7, 0x0, 0x1, [@TIPC_NLA_NET_NODEID={0xc, 0x3, 0x9}, @TIPC_NLA_NET_ID={0x8, 0x1, 0x80}, @TIPC_NLA_NET_ID={0x8, 0x1, 0x2}, @TIPC_NLA_NET_NODEID={0xc, 0x3, 0x8fe5}, @TIPC_NLA_NET_ID={0x8, 0x1, 0x3}, @TIPC_NLA_NET_ID={0x8, 0x1, 0x8}, @TIPC_NLA_NET_NODEID={0xc, 0x3, 0xe68}, @TIPC_NLA_NET_NODEID={0xc, 0x3, 0x1}]}]}, 0x1bc}, 0x1, 0x0, 0x0, 0x8004}, 0x800) openat$cgroup_ro(r0, &(0x7f0000000000)='cpuset.effective_mems\x00', 0x0, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:24 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xc}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:24 executing program 0: r0 = bpf$BPF_PROG_WITH_BTFID_LOAD(0x5, &(0x7f0000000240)=@bpf_ext={0x1c, 0xa, &(0x7f0000000000)=@raw=[@map_val={0x18, 0x9, 0x2, 0x0, 0xffffffffffffffff, 0x0, 0x0, 0x0, 0x4}, @initr0={0x18, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x44cb}, @map_fd={0x18, 0x7, 0x1, 0x0, 0x1}, @map_val={0x18, 0xd, 0x2, 0x0, 0x1, 0x0, 0x0, 0x0, 0xa2}, @func={0x85, 0x0, 0x1, 0x0, 0xfffffffffffffffa}, @exit], &(0x7f0000000080)='GPL\x00', 0xfffff67b, 0x72, &(0x7f00000000c0)=""/114, 0x41000, 0x12, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x8, &(0x7f0000000180)={0x6, 0x3}, 0x8, 0x10, &(0x7f00000001c0)={0x1, 0x9, 0x9, 0x7ff}, 0x10, 0x28e69, 0xffffffffffffffff, 0x0, &(0x7f0000000200)=[0xffffffffffffffff]}, 0x80) write$binfmt_misc(r0, &(0x7f00000002c0)={'syz1'}, 0x4) mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) r1 = openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (async) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) r2 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r2, 0x0, 0x0) (async) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) (async) r4 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r4, r3, 0x0, 0x10000a006) getsockopt$inet6_IPV6_IPSEC_POLICY(r3, 0x29, 0x22, &(0x7f00000005c0)={{{@in6=@dev, @in=@private, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}}, {{@in=@empty}, 0x0, @in6=@dev}}, &(0x7f00000006c0)=0xe8) sendmsg$nl_generic(0xffffffffffffffff, &(0x7f0000000980)={&(0x7f0000000580)={0x10, 0x0, 0x0, 0x4000000}, 0xc, &(0x7f0000000940)={&(0x7f0000000700)={0x238, 0x21, 0x400, 0x70bd2b, 0x25dfdbfb, {0x2}, [@nested={0x9e, 0x8d, 0x0, 0x1, [@generic="dfcbc232952c8b1529afd9e9ffafbc538a8157d5ab9f22ffa3fd2f63ffe4acec99b86866b5cc36f6fb2df7ca55d190128bcf32c3a8f87426debb95bd7b9624139ebaa657cfc92930d2c45575060788ba1d24586abd626b73c0580c00c5512ac6db548b60e98cc93efb9ff948a8338c2b922dc35871164e48d14f4bec990d6b1abb7b6f12ad5f55e3933c0c3290fb4dae3ef739cbc138ace2c96e"]}, @nested={0x181, 0x84, 0x0, 0x1, [@generic="d8e63a536da3c5bb0086b9c4af4736f4b84bd7f60d33c865cefb57d21664dcee43d03fbeb8a9e99c856aa9787516e41f", @generic="9e80cb85750c013c961ab48c30ee618cdaeee0766e2a", @generic="9adae4c44c4610a24ebbfdfe9d0fb6b1f00c744f5898dbaf43ed2882a6ffdf01b1a5df1f6c78299406b36a89a3ed109c81338eae23c777ff", @typed={0x8, 0x4e, 0x0, 0x0, @uid=r5}, @generic="ce7509ed1f7200415caa876e7df7fd3b5cf8f3fc110e8c4e410701b822b810262ac036ce137812fd1633dabb209194c2201a0b4d7a8a48423133771ed1d25c04f03912c4106e9e232087aedb8088207b27f1694cc72f256c9f47281c1fcef7ae721efef74d4df66e6fb38698ba10720c9d711dbf3358e3994d821fb3adc4220d45d11a809643695936707e6b9f3edaeef0ff6f559d97fb01c2abd19565f69b91d66bd16fda896b5291c09c925ee120556fd4c18617b13915fce990bcccf04f6e08a408db6bff83b19595abaf64495e00a3a2d375e42502102e994c7d95cba54e0604f1f178dc35215fd7c8dfb783f2", @typed={0x8, 0xa, 0x0, 0x0, @ipv4=@loopback}]}]}, 0x238}, 0x1, 0x0, 0x0, 0x4000}, 0x4008081) (async) ioctl$TUNSETOWNER(r1, 0x400454cc, r5) 10:42:24 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffc03) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffc03) (async) [ 2207.963887][ T8834] netlink: 'syz-executor.5': attribute type 1 has an invalid length. [ 2207.977064][ T8864] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:42:24 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xfffffffe}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:42:24 executing program 2: r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000)='./cgroup/syz1\x00', 0x200002, 0x0) ioctl$F2FS_IOC_MOVE_RANGE(r0, 0xc020f509, &(0x7f0000000080)={r0, 0x3, 0x4}) openat$cgroup_ro(r1, &(0x7f00000000c0)='cgroup.events\x00', 0x275a, 0x0) r2 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r2, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r2, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000100)='blkio.bfq.time_recursive\x00', 0x0, 0x0) 10:42:24 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffc03) [ 2207.981529][ T8834] workqueue: Failed to create a rescuer kthread for wq "bond861": -EINTR [ 2208.072269][ T8860] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 10:42:24 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) [ 2208.315705][ T8860] 8021q: adding VLAN 0 to HW filter on device bond1425 [ 2208.341819][ T8866] bridge1286: entered promiscuous mode [ 2208.355093][ T8866] bridge1286: entered allmulticast mode [ 2208.476035][ T8879] netlink: 'syz-executor.3': attribute type 1 has an invalid length. 10:42:24 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xffffffc3}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:24 executing program 2: r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000)='./cgroup/syz1\x00', 0x200002, 0x0) ioctl$F2FS_IOC_MOVE_RANGE(r0, 0xc020f509, &(0x7f0000000080)={r0, 0x3, 0x4}) openat$cgroup_ro(r1, &(0x7f00000000c0)='cgroup.events\x00', 0x275a, 0x0) (async) r2 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r2, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r2, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000100)='blkio.bfq.time_recursive\x00', 0x0, 0x0) 10:42:24 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) 10:42:24 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r1 = accept4$ax25(r0, &(0x7f0000000080)={{0x3, @rose}, [@remote, @null, @null, @bcast, @netrom, @netrom, @bcast, @default]}, &(0x7f0000000000)=0x48, 0x80000) sendfile(r1, r0, &(0x7f0000000100)=0x7, 0xffff) [ 2208.544064][ T8879] 8021q: adding VLAN 0 to HW filter on device bond1462 10:42:24 executing program 2: r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000)='./cgroup/syz1\x00', 0x200002, 0x0) ioctl$F2FS_IOC_MOVE_RANGE(r0, 0xc020f509, &(0x7f0000000080)={r0, 0x3, 0x4}) openat$cgroup_ro(r1, &(0x7f00000000c0)='cgroup.events\x00', 0x275a, 0x0) r2 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r2, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r2, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000100)='blkio.bfq.time_recursive\x00', 0x0, 0x0) openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000)='./cgroup/syz1\x00', 0x200002, 0x0) (async) ioctl$F2FS_IOC_MOVE_RANGE(r0, 0xc020f509, &(0x7f0000000080)={r0, 0x3, 0x4}) (async) openat$cgroup_ro(r1, &(0x7f00000000c0)='cgroup.events\x00', 0x275a, 0x0) (async) openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) (async) openat$cgroup_ro(r2, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r2, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000100)='blkio.bfq.time_recursive\x00', 0x0, 0x0) (async) 10:42:24 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xe}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:24 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) r1 = accept4$ax25(r0, &(0x7f0000000080)={{0x3, @rose}, [@remote, @null, @null, @bcast, @netrom, @netrom, @bcast, @default]}, &(0x7f0000000000)=0x48, 0x80000) sendfile(r1, r0, &(0x7f0000000100)=0x7, 0xffff) [ 2208.799817][ T8888] bond1462: (slave bridge1357): making interface the new active one [ 2208.821144][ T8888] bond1462: (slave bridge1357): Enslaving as an active interface with an up link [ 2208.830476][ T8894] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 10:42:24 executing program 2: socketpair$nbd(0x1, 0x1, 0x0, &(0x7f0000000000)={0xffffffffffffffff}) writev(r0, &(0x7f00000000c0)=[{&(0x7f0000000080)="ae06075345463e841f95d887", 0xc}], 0x1) close(0xffffffffffffffff) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2208.935078][ T8894] 8021q: adding VLAN 0 to HW filter on device bond861 [ 2208.976702][ T8884] bond861: (slave bridge1016): making interface the new active one [ 2208.989258][ T8884] bond861: (slave bridge1016): Enslaving as an active interface with an up link 10:42:25 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0xffffffff}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:42:25 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (async) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) 10:42:25 executing program 2: socketpair$nbd(0x1, 0x1, 0x0, &(0x7f0000000000)) (async) socketpair$nbd(0x1, 0x1, 0x0, &(0x7f0000000000)={0xffffffffffffffff}) writev(r0, &(0x7f00000000c0)=[{&(0x7f0000000080)="ae06075345463e841f95d887", 0xc}], 0x1) close(0xffffffffffffffff) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:25 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) accept4$ax25(r0, &(0x7f0000000080)={{0x3, @rose}, [@remote, @null, @null, @bcast, @netrom, @netrom, @bcast, @default]}, &(0x7f0000000000)=0x48, 0x80000) (async) r1 = accept4$ax25(r0, &(0x7f0000000080)={{0x3, @rose}, [@remote, @null, @null, @bcast, @netrom, @netrom, @bcast, @default]}, &(0x7f0000000000)=0x48, 0x80000) sendfile(r1, r0, &(0x7f0000000100)=0x7, 0xffff) [ 2209.102966][ T8906] 8021q: adding VLAN 0 to HW filter on device bond1426 [ 2209.213454][ T8913] bridge1286: entered promiscuous mode [ 2209.221336][ T8913] bridge1286: entered allmulticast mode [ 2209.382342][ T8932] 8021q: adding VLAN 0 to HW filter on device bond1463 [ 2209.511077][ T8934] bond1463: (slave bridge1358): making interface the new active one [ 2209.538360][ T8934] bond1463: (slave bridge1358): Enslaving as an active interface with an up link 10:42:25 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xffffffe4}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:25 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r0, 0x40305828, &(0x7f0000000000)={0x0, 0x2, 0x40, 0x9}) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:25 executing program 2: socketpair$nbd(0x1, 0x1, 0x0, &(0x7f0000000000)={0xffffffffffffffff}) writev(r0, &(0x7f00000000c0)=[{&(0x7f0000000080)="ae06075345463e841f95d887", 0xc}], 0x1) (async) close(0xffffffffffffffff) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r1, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:25 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) r0 = accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) ioctl$FS_IOC_MEASURE_VERITY(r0, 0xc0046686, &(0x7f0000000000)={0x2, 0x58, "d4282f82d10a68755951ee4fa6790acae4a9973bb3bcaef5c1e274fc22829f9a84db95419ee072f1221fe732bdaf18a7e295aa4752bb08332d04225298b82a50928353372a62b776321ed829ba9367c9080a6e5f7b1b09ee"}) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r1 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r1, 0x0, 0x0) 10:42:25 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0x10}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) [ 2209.629609][ T8952] 8021q: adding VLAN 0 to HW filter on device bond862 10:42:25 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r0, 0x40305828, &(0x7f0000000000)={0x0, 0x2, 0x40, 0x9}) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:25 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) openat$cgroup_subtree(0xffffffffffffffff, &(0x7f0000000000), 0x2, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2209.697576][ T8953] bond862: (slave bridge1017): making interface the new active one [ 2209.716141][ T8953] bond862: (slave bridge1017): Enslaving as an active interface with an up link 10:42:25 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x2, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) [ 2209.765999][ T8965] 8021q: adding VLAN 0 to HW filter on device bond1427 [ 2209.799454][ T8974] bridge1286: entered promiscuous mode 10:42:25 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) openat$cgroup_subtree(0xffffffffffffffff, &(0x7f0000000000), 0x2, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2209.816379][ T8974] bridge1286: entered allmulticast mode 10:42:25 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r0, 0x40305828, &(0x7f0000000000)={0x0, 0x2, 0x40, 0x9}) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:25 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) r0 = accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) ioctl$FS_IOC_MEASURE_VERITY(r0, 0xc0046686, &(0x7f0000000000)={0x2, 0x58, "d4282f82d10a68755951ee4fa6790acae4a9973bb3bcaef5c1e274fc22829f9a84db95419ee072f1221fe732bdaf18a7e295aa4752bb08332d04225298b82a50928353372a62b776321ed829ba9367c9080a6e5f7b1b09ee"}) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) r1 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) sendmsg$nl_route(r1, 0x0, 0x0) 10:42:25 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) openat$cgroup_subtree(0xffffffffffffffff, &(0x7f0000000000), 0x2, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) openat$cgroup_subtree(0xffffffffffffffff, &(0x7f0000000000), 0x2, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) [ 2209.956505][ T8974] bond1427: (slave bridge1286): making interface the new active one [ 2210.024861][ T8974] bond1427: (slave bridge1286): Enslaving as an active interface with an up link 10:42:26 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xfffffff0}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:26 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r1 = accept$inet(r0, &(0x7f0000000000)={0x2, 0x0, @loopback}, &(0x7f0000000080)=0x10) setsockopt$IP_VS_SO_SET_ZERO(r1, 0x0, 0x48f, &(0x7f0000000100)={0x3b, @local, 0x4e22, 0x3, 'sh\x00', 0x0, 0x40, 0x4d}, 0x2c) openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r2 = openat$cgroup(r0, &(0x7f00000002c0)='syz1\x00', 0x200002, 0x0) openat$cgroup_ro(r2, &(0x7f0000000300)='blkio.bfq.io_merged\x00', 0x0, 0x0) r3 = openat$cgroup_ro(r2, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r3, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r3, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f00000013c0)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r3, @ANYBLOB="000000000100000018330000040000000000000000000000450140001000020000000000000000c704ae40194d09bb7e400954e50000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff010009cc18c495f27f207f97802513a4b586b26ea27a294c12639e2903a14e28347ec83be615992ac0f80e8805139c1d9d568faaba343893a1e5b7e8b86cb687287a3304d993369e7366277bb41ca2b0eb98e1883b0deacfb6fdec160b6db36860d2bc2f"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) r4 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000140)='blkio.bfq.io_merged\x00', 0x0, 0x0) recvfrom$inet(r4, &(0x7f00000003c0)=""/4096, 0x1000, 0x1, &(0x7f0000000180)={0x2, 0x4e21, @remote}, 0x10) write$tun(r3, 0x0, 0x0) epoll_ctl$EPOLL_CTL_ADD(0xffffffffffffffff, 0x1, r3, &(0x7f00000000c0)={0x2000}) 10:42:26 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2210.146963][ T8973] 8021q: adding VLAN 0 to HW filter on device bond1464 10:42:26 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2210.225148][ T8976] bond1464: (slave bridge1359): making interface the new active one [ 2210.238044][ T8976] bond1464: (slave bridge1359): Enslaving as an active interface with an up link 10:42:26 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0x12}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:26 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) r1 = accept$inet(r0, &(0x7f0000000000)={0x2, 0x0, @loopback}, &(0x7f0000000080)=0x10) setsockopt$IP_VS_SO_SET_ZERO(r1, 0x0, 0x48f, &(0x7f0000000100)={0x3b, @local, 0x4e22, 0x3, 'sh\x00', 0x0, 0x40, 0x4d}, 0x2c) openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) r2 = openat$cgroup(r0, &(0x7f00000002c0)='syz1\x00', 0x200002, 0x0) openat$cgroup_ro(r2, &(0x7f0000000300)='blkio.bfq.io_merged\x00', 0x0, 0x0) (async) r3 = openat$cgroup_ro(r2, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r3, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) ioctl$FS_IOC_RESVSP(r3, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f00000013c0)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r3, @ANYBLOB="000000000100000018330000040000000000000000000000450140001000020000000000000000c704ae40194d09bb7e400954e50000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff010009cc18c495f27f207f97802513a4b586b26ea27a294c12639e2903a14e28347ec83be615992ac0f80e8805139c1d9d568faaba343893a1e5b7e8b86cb687287a3304d993369e7366277bb41ca2b0eb98e1883b0deacfb6fdec160b6db36860d2bc2f"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) (async) r4 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000140)='blkio.bfq.io_merged\x00', 0x0, 0x0) recvfrom$inet(r4, &(0x7f00000003c0)=""/4096, 0x1000, 0x1, &(0x7f0000000180)={0x2, 0x4e21, @remote}, 0x10) write$tun(r3, 0x0, 0x0) epoll_ctl$EPOLL_CTL_ADD(0xffffffffffffffff, 0x1, r3, &(0x7f00000000c0)={0x2000}) [ 2210.269486][ T8987] validate_nla: 5 callbacks suppressed [ 2210.269517][ T8987] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 10:42:26 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) r0 = accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) ioctl$FS_IOC_MEASURE_VERITY(r0, 0xc0046686, &(0x7f0000000000)={0x2, 0x58, "d4282f82d10a68755951ee4fa6790acae4a9973bb3bcaef5c1e274fc22829f9a84db95419ee072f1221fe732bdaf18a7e295aa4752bb08332d04225298b82a50928353372a62b776321ed829ba9367c9080a6e5f7b1b09ee"}) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r1 = socket$netlink(0x10, 0x3, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r1, 0x0, 0x0) [ 2210.445568][ T8987] 8021q: adding VLAN 0 to HW filter on device bond863 [ 2210.500431][ T9002] netlink: 28 bytes leftover after parsing attributes in process `syz-executor.5'. [ 2210.526965][ T9012] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 10:42:26 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x3, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:42:26 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) 10:42:26 executing program 0: mmap(&(0x7f000034a000/0x3000)=nil, 0x3000, 0xe, 0xc3072, 0xffffffffffffffff, 0xcf917000) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) r0 = accept(0xffffffffffffffff, 0x0, 0x0) ioctl$FITRIM(r0, 0xc0185879, &(0x7f0000000000)={0xffff, 0x14, 0x6}) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r1 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r1, 0x0, 0x0) 10:42:26 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r1 = accept$inet(r0, &(0x7f0000000000)={0x2, 0x0, @loopback}, &(0x7f0000000080)=0x10) setsockopt$IP_VS_SO_SET_ZERO(r1, 0x0, 0x48f, &(0x7f0000000100)={0x3b, @local, 0x4e22, 0x3, 'sh\x00', 0x0, 0x40, 0x4d}, 0x2c) (async) openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) (async) r2 = openat$cgroup(r0, &(0x7f00000002c0)='syz1\x00', 0x200002, 0x0) openat$cgroup_ro(r2, &(0x7f0000000300)='blkio.bfq.io_merged\x00', 0x0, 0x0) r3 = openat$cgroup_ro(r2, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) ioctl$FS_IOC_RESVSP(r3, 0x40305828, &(0x7f0000001780)={0x2f, 0x0, 0x0, 0x5fffff}) (async) ioctl$FS_IOC_RESVSP(r3, 0x40305829, &(0x7f0000000200)={0xff00, 0x0, 0x0, 0x41, 0x3000000}) bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000240)={0x6, 0xc, &(0x7f00000013c0)=ANY=[@ANYBLOB="182700001fcf05b5c972aff6e1b2c11108f89cc02a767211e1ae64609eac38c6f3adfa9a099b1cd1bf40efe7ad4d0ff2a77425b9f2853396b2316e9787d8c6247e9fe04ecae21ee1d48943d41484b8f30bd7aab2edeb690403", @ANYRES32=r3, @ANYBLOB="000000000100000018330000040000000000000000000000450140001000020000000000000000c704ae40194d09bb7e400954e50000000000000000001800000008000000000000000180000085200000040000008520000002000000ca58f8ff010009cc18c495f27f207f97802513a4b586b26ea27a294c12639e2903a14e28347ec83be615992ac0f80e8805139c1d9d568faaba343893a1e5b7e8b86cb687287a3304d993369e7366277bb41ca2b0eb98e1883b0deacfb6fdec160b6db36860d2bc2f"], 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, &(0x7f00000001c0)={0x4, 0x0, 0x80000001, 0xfffff000}, 0x10}, 0x80) r4 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000140)='blkio.bfq.io_merged\x00', 0x0, 0x0) recvfrom$inet(r4, &(0x7f00000003c0)=""/4096, 0x1000, 0x1, &(0x7f0000000180)={0x2, 0x4e21, @remote}, 0x10) write$tun(r3, 0x0, 0x0) (async) epoll_ctl$EPOLL_CTL_ADD(0xffffffffffffffff, 0x1, r3, &(0x7f00000000c0)={0x2000}) [ 2210.611804][ T9012] 8021q: adding VLAN 0 to HW filter on device bond1428 [ 2210.613535][ T9033] EXT4-fs warning: 19 callbacks suppressed [ 2210.613551][ T9033] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2210.626632][ T9019] netlink: 'syz-executor.3': attribute type 1 has an invalid length. 10:42:26 executing program 0: mmap(&(0x7f000034a000/0x3000)=nil, 0x3000, 0xe, 0xc3072, 0xffffffffffffffff, 0xcf917000) (async) mmap(&(0x7f000034a000/0x3000)=nil, 0x3000, 0xe, 0xc3072, 0xffffffffffffffff, 0xcf917000) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) r0 = accept(0xffffffffffffffff, 0x0, 0x0) ioctl$FITRIM(r0, 0xc0185879, &(0x7f0000000000)={0xffff, 0x14, 0x6}) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r1 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r1, 0x0, 0x0) (async) sendmsg$nl_route(r1, 0x0, 0x0) [ 2210.666632][ T9019] 8021q: adding VLAN 0 to HW filter on device bond1465 [ 2210.797700][ T9016] bridge1287: entered promiscuous mode [ 2210.821540][ T9016] bridge1287: entered allmulticast mode [ 2210.938007][ T9016] bond1428: (slave bridge1287): making interface the new active one [ 2210.976415][ T9016] bond1428: (slave bridge1287): Enslaving as an active interface with an up link 10:42:27 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xfffffffe}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:27 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='blkio.bfq.time_recursive\x00', 0x275a, 0x0) bind$inet(r0, &(0x7f0000000000)={0x2, 0x4e24, @rand_addr=0x64010102}, 0x10) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:27 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r1) ioctl$F2FS_IOC_COMMIT_ATOMIC_WRITE(r1, 0xf502, 0x0) 10:42:27 executing program 0: mmap(&(0x7f000034a000/0x3000)=nil, 0x3000, 0xe, 0xc3072, 0xffffffffffffffff, 0xcf917000) (async) mmap(&(0x7f000034a000/0x3000)=nil, 0x3000, 0xe, 0xc3072, 0xffffffffffffffff, 0xcf917000) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) r0 = accept(0xffffffffffffffff, 0x0, 0x0) ioctl$FITRIM(r0, 0xc0185879, &(0x7f0000000000)={0xffff, 0x14, 0x6}) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$netlink(0x10, 0x3, 0x0) (async) r1 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r1, 0x0, 0x0) [ 2211.081078][ T9054] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2211.147502][ T9024] bond1465: (slave bridge1360): making interface the new active one [ 2211.208380][ T9024] bond1465: (slave bridge1360): Enslaving as an active interface with an up link 10:42:27 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0x1a}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:27 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='blkio.bfq.time_recursive\x00', 0x275a, 0x0) bind$inet(r0, &(0x7f0000000000)={0x2, 0x4e24, @rand_addr=0x64010102}, 0x10) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) 10:42:27 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r1) (async) ioctl$F2FS_IOC_COMMIT_ATOMIC_WRITE(r1, 0xf502, 0x0) [ 2211.289781][ T9072] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2211.297366][ T9068] bridge1288: entered promiscuous mode [ 2211.335938][ T9068] bridge1288: entered allmulticast mode [ 2211.395803][ T9049] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 10:42:27 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x4, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:42:27 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r0 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r0, 0x0, 0x0) r1 = bpf$OBJ_GET_MAP(0x7, &(0x7f0000000040)={&(0x7f0000000000)='./file0\x00', 0x0, 0x10}, 0x10) preadv(r1, &(0x7f0000000300)=[{&(0x7f0000000080)=""/136, 0x88}, {&(0x7f0000000180)=""/211, 0xd3}, {&(0x7f0000000400)=""/4096, 0x1000}, {&(0x7f0000000280)=""/84, 0x54}], 0x4, 0x2, 0x8000) 10:42:27 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) pipe(&(0x7f0000000140)={0xffffffffffffffff}) close(r1) ioctl$F2FS_IOC_COMMIT_ATOMIC_WRITE(r1, 0xf502, 0x0) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) pipe(&(0x7f0000000140)) (async) close(r1) (async) ioctl$F2FS_IOC_COMMIT_ATOMIC_WRITE(r1, 0xf502, 0x0) (async) 10:42:27 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='blkio.bfq.time_recursive\x00', 0x275a, 0x0) bind$inet(r0, &(0x7f0000000000)={0x2, 0x4e24, @rand_addr=0x64010102}, 0x10) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2211.468049][ T9049] 8021q: adding VLAN 0 to HW filter on device bond864 [ 2211.477023][ T9042] netlink: 28 bytes leftover after parsing attributes in process `syz-executor.5'. [ 2211.505078][ T9067] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2211.558298][ T9087] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further 10:42:27 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000000)='io.stat\x00', 0x0, 0x0) r1 = bpf$ITER_CREATE(0x21, &(0x7f0000000080)={r0}, 0x8) r2 = openat$cgroup_ro(r1, &(0x7f0000000040)='blkio.bfq.io_merged\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r2, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2211.629362][ T9092] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2211.666423][ T9067] 8021q: adding VLAN 0 to HW filter on device bond1429 [ 2211.676402][ T9075] netlink: 'syz-executor.3': attribute type 1 has an invalid length. 10:42:27 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0xffffffff}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:27 executing program 4: getsockopt$inet_sctp_SCTP_PEER_AUTH_CHUNKS(0xffffffffffffffff, 0x84, 0x1a, &(0x7f0000000180)={0x0, 0x44, "591d93c1cf8a75e490d868b2c1b82b9bb996addcf53b653f76d6c35c1a0ca8f5bf9d2ebd5a45e867ed42cabb74221d0fc9842854f04b096e341b2bacccedaf46f65e0d36"}, &(0x7f0000000200)=0x4c) r1 = socket$inet_sctp(0x2, 0x1, 0x84) getsockopt$inet_sctp_SCTP_MAX_BURST(r1, 0x84, 0xd, &(0x7f0000000000)=@assoc_value, &(0x7f00000000c0)=0x8) setsockopt$inet_sctp_SCTP_MAXSEG(r1, 0x84, 0xd, &(0x7f0000000240)=@assoc_value={r0, 0x3}, 0x8) ioctl$sock_SIOCGIFINDEX(0xffffffffffffffff, 0x8933, &(0x7f0000000000)={'bridge_slave_0\x00'}) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r2, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r3 = syz_genetlink_get_family_id$ipvs(&(0x7f0000000380), 0xffffffffffffffff) r4 = socket$nl_generic(0x10, 0x3, 0x10) sendmsg$IPVS_CMD_NEW_DAEMON(r4, &(0x7f00000002c0)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000080)={0x3c, r3, 0x1, 0x0, 0x0, {}, [@IPVS_CMD_ATTR_DAEMON={0x28, 0x3, 0x0, 0x1, [@IPVS_DAEMON_ATTR_STATE={0x8, 0x1, 0x2}, @IPVS_DAEMON_ATTR_MCAST_IFN={0x14, 0x2, 'vcan0\x00'}, @IPVS_DAEMON_ATTR_SYNC_ID={0x8}]}]}, 0x3c}}, 0x0) sendmsg$IPVS_CMD_DEL_DEST(r2, &(0x7f0000000140)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x200000}, 0xc, &(0x7f0000000100)={&(0x7f00000000c0)={0x24, r3, 0x314, 0x70bd29, 0x25dfdbfd, {}, [@IPVS_CMD_ATTR_TIMEOUT_TCP_FIN={0x8, 0x5, 0x7f}, @IPVS_CMD_ATTR_TIMEOUT_TCP_FIN={0x8, 0x5, 0x8001}]}, 0x24}, 0x1, 0x0, 0x0, 0x20008114}, 0x40) 10:42:27 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000000)='io.stat\x00', 0x0, 0x0) r1 = bpf$ITER_CREATE(0x21, &(0x7f0000000080)={r0}, 0x8) r2 = openat$cgroup_ro(r1, &(0x7f0000000040)='blkio.bfq.io_merged\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r2, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2211.785399][ T9075] 8021q: adding VLAN 0 to HW filter on device bond1466 10:42:27 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000000)='io.stat\x00', 0x0, 0x0) r1 = bpf$ITER_CREATE(0x21, &(0x7f0000000080)={r0}, 0x8) r2 = openat$cgroup_ro(r1, &(0x7f0000000040)='blkio.bfq.io_merged\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r2, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) [ 2211.859794][ T9104] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2211.950308][ T9077] bond1466: (slave bridge1361): making interface the new active one [ 2211.991603][ T9077] bond1466: (slave bridge1361): Enslaving as an active interface with an up link [ 2212.015963][ T9097] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 10:42:28 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0x24}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:28 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (async) accept(0xffffffffffffffff, 0x0, 0x0) (async) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async, rerun: 32) r0 = socket$netlink(0x10, 0x3, 0x0) (rerun: 32) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) sendmsg$nl_route(r0, 0x0, 0x0) (async) r1 = bpf$OBJ_GET_MAP(0x7, &(0x7f0000000040)={&(0x7f0000000000)='./file0\x00', 0x0, 0x10}, 0x10) preadv(r1, &(0x7f0000000300)=[{&(0x7f0000000080)=""/136, 0x88}, {&(0x7f0000000180)=""/211, 0xd3}, {&(0x7f0000000400)=""/4096, 0x1000}, {&(0x7f0000000280)=""/84, 0x54}], 0x4, 0x2, 0x8000) 10:42:28 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r1 = bpf$BPF_RAW_TRACEPOINT_OPEN_UNNAMED(0x11, &(0x7f0000000000)={0x0, r0}, 0x10) mmap(&(0x7f0000ffe000/0x2000)=nil, 0x2000, 0x0, 0x11, r1, 0x7c831000) [ 2212.100937][ T9117] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2212.159832][ T9097] 8021q: adding VLAN 0 to HW filter on device bond865 [ 2212.210133][ T9109] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 10:42:28 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x5, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:42:28 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) (async) r0 = socket$netlink(0x10, 0x3, 0x0) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r0, 0x0, 0x0) r1 = bpf$OBJ_GET_MAP(0x7, &(0x7f0000000040)={&(0x7f0000000000)='./file0\x00', 0x0, 0x10}, 0x10) preadv(r1, &(0x7f0000000300)=[{&(0x7f0000000080)=""/136, 0x88}, {&(0x7f0000000180)=""/211, 0xd3}, {&(0x7f0000000400)=""/4096, 0x1000}, {&(0x7f0000000280)=""/84, 0x54}], 0x4, 0x2, 0x8000) 10:42:28 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r1 = bpf$BPF_RAW_TRACEPOINT_OPEN_UNNAMED(0x11, &(0x7f0000000000)={0x0, r0}, 0x10) mmap(&(0x7f0000ffe000/0x2000)=nil, 0x2000, 0x0, 0x11, r1, 0x7c831000) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) bpf$BPF_RAW_TRACEPOINT_OPEN_UNNAMED(0x11, &(0x7f0000000000)={0x0, r0}, 0x10) (async) mmap(&(0x7f0000ffe000/0x2000)=nil, 0x2000, 0x0, 0x11, r1, 0x7c831000) (async) 10:42:28 executing program 4: getsockopt$inet_sctp_SCTP_PEER_AUTH_CHUNKS(0xffffffffffffffff, 0x84, 0x1a, &(0x7f0000000180)={0x0, 0x44, "591d93c1cf8a75e490d868b2c1b82b9bb996addcf53b653f76d6c35c1a0ca8f5bf9d2ebd5a45e867ed42cabb74221d0fc9842854f04b096e341b2bacccedaf46f65e0d36"}, &(0x7f0000000200)=0x4c) (async) r1 = socket$inet_sctp(0x2, 0x1, 0x84) getsockopt$inet_sctp_SCTP_MAX_BURST(r1, 0x84, 0xd, &(0x7f0000000000)=@assoc_value, &(0x7f00000000c0)=0x8) (async) setsockopt$inet_sctp_SCTP_MAXSEG(r1, 0x84, 0xd, &(0x7f0000000240)=@assoc_value={r0, 0x3}, 0x8) (async) ioctl$sock_SIOCGIFINDEX(0xffffffffffffffff, 0x8933, &(0x7f0000000000)={'bridge_slave_0\x00'}) (async) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r2, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) r3 = syz_genetlink_get_family_id$ipvs(&(0x7f0000000380), 0xffffffffffffffff) (async, rerun: 32) r4 = socket$nl_generic(0x10, 0x3, 0x10) (rerun: 32) sendmsg$IPVS_CMD_NEW_DAEMON(r4, &(0x7f00000002c0)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000080)={0x3c, r3, 0x1, 0x0, 0x0, {}, [@IPVS_CMD_ATTR_DAEMON={0x28, 0x3, 0x0, 0x1, [@IPVS_DAEMON_ATTR_STATE={0x8, 0x1, 0x2}, @IPVS_DAEMON_ATTR_MCAST_IFN={0x14, 0x2, 'vcan0\x00'}, @IPVS_DAEMON_ATTR_SYNC_ID={0x8}]}]}, 0x3c}}, 0x0) sendmsg$IPVS_CMD_DEL_DEST(r2, &(0x7f0000000140)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x200000}, 0xc, &(0x7f0000000100)={&(0x7f00000000c0)={0x24, r3, 0x314, 0x70bd29, 0x25dfdbfd, {}, [@IPVS_CMD_ATTR_TIMEOUT_TCP_FIN={0x8, 0x5, 0x7f}, @IPVS_CMD_ATTR_TIMEOUT_TCP_FIN={0x8, 0x5, 0x8001}]}, 0x24}, 0x1, 0x0, 0x0, 0x20008114}, 0x40) [ 2212.262117][ T9129] IPVS: sync thread started: state = BACKUP, mcast_ifn = vcan0, syncid = 0, id = 0 [ 2212.303688][ T9132] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2212.419146][ T9109] 8021q: adding VLAN 0 to HW filter on device bond1430 10:42:28 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r1 = bpf$BPF_RAW_TRACEPOINT_OPEN_UNNAMED(0x11, &(0x7f0000000000)={0x0, r0}, 0x10) mmap(&(0x7f0000ffe000/0x2000)=nil, 0x2000, 0x0, 0x11, r1, 0x7c831000) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) (async) bpf$BPF_RAW_TRACEPOINT_OPEN_UNNAMED(0x11, &(0x7f0000000000)={0x0, r0}, 0x10) (async) mmap(&(0x7f0000ffe000/0x2000)=nil, 0x2000, 0x0, 0x11, r1, 0x7c831000) (async) [ 2212.469084][ T9149] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2212.625713][ T9112] bridge1288: entered promiscuous mode [ 2212.646192][ T9112] bridge1288: entered allmulticast mode 10:42:28 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:28 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$binfmt_misc(r0, &(0x7f0000000080)={'syz0', "d473d9161fe11707d133f20ebd3b2a50ce7e3b3666cf7ab9596e9d4045cde462922b84f95bb3d05c19a40f9b5ba32d914e902c2ff004fc2d0aaffddaefb10679b2461054c720153e136b4fd8308bebcc72e9366ebbea43a0712c54385ff9ffb18acaccb48ae5a9be26fc8f63e2e6581a471e62d96bc963cf832cfafff233c1c89012234f481007cd38aaab0b7faa50465f30de9473039eda72f0d9d289c94c413b622e63be014b"}, 0xab) r1 = openat$cgroup_ro(r0, &(0x7f0000000000)='memory.events.local\x00', 0x0, 0x0) openat$cgroup_ro(r1, &(0x7f0000000140)='devices.list\x00', 0x0, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0x3) 10:42:28 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) r0 = accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r1 = socket$netlink(0x10, 0x3, 0x0) ioctl$F2FS_IOC_MOVE_RANGE(0xffffffffffffffff, 0xc020f509, &(0x7f0000000000)={r0, 0x0, 0x7f, 0xffffffffffffffff}) r3 = syz_genetlink_get_family_id$mptcp(&(0x7f0000000080), r0) sendmsg$MPTCP_PM_CMD_GET_ADDR(r2, &(0x7f0000000180)={&(0x7f0000000040)={0x10, 0x0, 0x0, 0x2002042}, 0xc, &(0x7f0000000100)={&(0x7f00000001c0)=ANY=[@ANYBLOB="ad3aeeb65bd2bdc461faeba163ee6486f71d449826f97129dae7ad998be4894a166ffa6ed5773d377a21730002d4658692c36cf397f984ffcd2ced2be51ad4b25ccc3501ce18a16c84f06ec1faa182000000002981a578aaeb1e789afcdc7cfbe46e95bae6f976c87f16537011544b80b0441f1e6aa8b0d4890d20d3997117cc2d8155916434e6022c85946055c6860bf76b7c159c66e6ffba37a43319ce3560a5b707c0e6a9b9e533e9f1fac2c8e7155ca249262c1c6b2006e5b1a733b53bfd9025fac12a0587bc17e52c97f1e7b95b2eaae0523f79c936db553cc8e0357f8757e7d785585f5d2e8b955a3f000000000000000000001d19e18dbc20972f3d0272e08bf3d6673123669967b474a2", @ANYRES16=r3, @ANYBLOB="080026bd7000fddbdf2503000000080003000700000008000200010000000800020001000000"], 0x2c}, 0x1, 0x0, 0x0, 0x20048000}, 0x4010000) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r1, 0x0, 0x0) 10:42:28 executing program 4: getsockopt$inet_sctp_SCTP_PEER_AUTH_CHUNKS(0xffffffffffffffff, 0x84, 0x1a, &(0x7f0000000180)={0x0, 0x44, "591d93c1cf8a75e490d868b2c1b82b9bb996addcf53b653f76d6c35c1a0ca8f5bf9d2ebd5a45e867ed42cabb74221d0fc9842854f04b096e341b2bacccedaf46f65e0d36"}, &(0x7f0000000200)=0x4c) r1 = socket$inet_sctp(0x2, 0x1, 0x84) getsockopt$inet_sctp_SCTP_MAX_BURST(r1, 0x84, 0xd, &(0x7f0000000000)=@assoc_value, &(0x7f00000000c0)=0x8) (async) setsockopt$inet_sctp_SCTP_MAXSEG(r1, 0x84, 0xd, &(0x7f0000000240)=@assoc_value={r0, 0x3}, 0x8) (async) ioctl$sock_SIOCGIFINDEX(0xffffffffffffffff, 0x8933, &(0x7f0000000000)={'bridge_slave_0\x00'}) (async) r2 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r2, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r3 = syz_genetlink_get_family_id$ipvs(&(0x7f0000000380), 0xffffffffffffffff) r4 = socket$nl_generic(0x10, 0x3, 0x10) sendmsg$IPVS_CMD_NEW_DAEMON(r4, &(0x7f00000002c0)={0x0, 0x0, &(0x7f0000000280)={&(0x7f0000000080)={0x3c, r3, 0x1, 0x0, 0x0, {}, [@IPVS_CMD_ATTR_DAEMON={0x28, 0x3, 0x0, 0x1, [@IPVS_DAEMON_ATTR_STATE={0x8, 0x1, 0x2}, @IPVS_DAEMON_ATTR_MCAST_IFN={0x14, 0x2, 'vcan0\x00'}, @IPVS_DAEMON_ATTR_SYNC_ID={0x8}]}]}, 0x3c}}, 0x0) (async) sendmsg$IPVS_CMD_DEL_DEST(r2, &(0x7f0000000140)={&(0x7f0000000080)={0x10, 0x0, 0x0, 0x200000}, 0xc, &(0x7f0000000100)={&(0x7f00000000c0)={0x24, r3, 0x314, 0x70bd29, 0x25dfdbfd, {}, [@IPVS_CMD_ATTR_TIMEOUT_TCP_FIN={0x8, 0x5, 0x7f}, @IPVS_CMD_ATTR_TIMEOUT_TCP_FIN={0x8, 0x5, 0x8001}]}, 0x24}, 0x1, 0x0, 0x0, 0x20008114}, 0x40) [ 2212.765811][ T9126] netlink: 'syz-executor.3': attribute type 1 has an invalid length. [ 2212.835403][ T9126] 8021q: adding VLAN 0 to HW filter on device bond1467 [ 2212.866879][ T9168] EXT4-fs warning (device sda1): ext4_group_extend:1869: need to use ext2online to resize further [ 2213.016822][ T9130] bond1467: (slave bridge1362): making interface the new active one [ 2213.048847][ T9130] bond1467: (slave bridge1362): Enslaving as an active interface with an up link 10:42:29 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0x3c}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:29 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$binfmt_misc(r0, &(0x7f0000000080)={'syz0', "d473d9161fe11707d133f20ebd3b2a50ce7e3b3666cf7ab9596e9d4045cde462922b84f95bb3d05c19a40f9b5ba32d914e902c2ff004fc2d0aaffddaefb10679b2461054c720153e136b4fd8308bebcc72e9366ebbea43a0712c54385ff9ffb18acaccb48ae5a9be26fc8f63e2e6581a471e62d96bc963cf832cfafff233c1c89012234f481007cd38aaab0b7faa50465f30de9473039eda72f0d9d289c94c413b622e63be014b"}, 0xab) r1 = openat$cgroup_ro(r0, &(0x7f0000000000)='memory.events.local\x00', 0x0, 0x0) openat$cgroup_ro(r1, &(0x7f0000000140)='devices.list\x00', 0x0, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0x3) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) write$binfmt_misc(r0, &(0x7f0000000080)={'syz0', "d473d9161fe11707d133f20ebd3b2a50ce7e3b3666cf7ab9596e9d4045cde462922b84f95bb3d05c19a40f9b5ba32d914e902c2ff004fc2d0aaffddaefb10679b2461054c720153e136b4fd8308bebcc72e9366ebbea43a0712c54385ff9ffb18acaccb48ae5a9be26fc8f63e2e6581a471e62d96bc963cf832cfafff233c1c89012234f481007cd38aaab0b7faa50465f30de9473039eda72f0d9d289c94c413b622e63be014b"}, 0xab) (async) openat$cgroup_ro(r0, &(0x7f0000000000)='memory.events.local\x00', 0x0, 0x0) (async) openat$cgroup_ro(r1, &(0x7f0000000140)='devices.list\x00', 0x0, 0x0) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0x3) (async) 10:42:29 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (async) r0 = accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r1 = socket$netlink(0x10, 0x3, 0x0) (async) ioctl$F2FS_IOC_MOVE_RANGE(0xffffffffffffffff, 0xc020f509, &(0x7f0000000000)={r0, 0x0, 0x7f, 0xffffffffffffffff}) r3 = syz_genetlink_get_family_id$mptcp(&(0x7f0000000080), r0) sendmsg$MPTCP_PM_CMD_GET_ADDR(r2, &(0x7f0000000180)={&(0x7f0000000040)={0x10, 0x0, 0x0, 0x2002042}, 0xc, &(0x7f0000000100)={&(0x7f00000001c0)=ANY=[@ANYBLOB="ad3aeeb65bd2bdc461faeba163ee6486f71d449826f97129dae7ad998be4894a166ffa6ed5773d377a21730002d4658692c36cf397f984ffcd2ced2be51ad4b25ccc3501ce18a16c84f06ec1faa182000000002981a578aaeb1e789afcdc7cfbe46e95bae6f976c87f16537011544b80b0441f1e6aa8b0d4890d20d3997117cc2d8155916434e6022c85946055c6860bf76b7c159c66e6ffba37a43319ce3560a5b707c0e6a9b9e533e9f1fac2c8e7155ca249262c1c6b2006e5b1a733b53bfd9025fac12a0587bc17e52c97f1e7b95b2eaae0523f79c936db553cc8e0357f8757e7d785585f5d2e8b955a3f000000000000000000001d19e18dbc20972f3d0272e08bf3d6673123669967b474a2", @ANYRES16=r3, @ANYBLOB="080026bd7000fddbdf2503000000080003000700000008000200010000000800020001000000"], 0x2c}, 0x1, 0x0, 0x0, 0x20048000}, 0x4010000) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) sendmsg$nl_route(r1, 0x0, 0x0) [ 2213.068930][ T9155] netlink: 'syz-executor.5': attribute type 1 has an invalid length. 10:42:29 executing program 5: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) r0 = openat$cgroup_root(0xffffffffffffff9c, &(0x7f0000000000), 0x200002, 0x0) openat$cgroup_ro(r0, &(0x7f0000000040)='freezer.state\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000340)=[{&(0x7f0000000280)="043caa07f3a3f76d858ace87d769ded7550278376e8c4f6550d9108cbf5189deea33e10b8a7224c6107d53d01f904f58c8e83e60c4192c5a7fbd1a5f9c9fb63ca8dddee8d195355273926702d073752d80d8545e667b5c485a", 0x59}, {&(0x7f0000000100)="e48c29435b087869336f3e74363df6ea882508c8aa487ff564e800ccd724f50e3896c5d551b1a2ec6bb29187299fe2e06c11dfcb2075f9a839b34a4a6b4c2bec", 0x40}, {&(0x7f0000000400)="a318ece45becd6e47ff8375b816640bce6a9ff4d8e83ab791404d351c3ba957f2f3a0220176c17e899248cf03c8a908c4ade8de15d81e79b803df97f6fe5e817c2efde3befe034a7bc6d52a0c1caf88dc7d2077e40879876ce4aa8d6273615939a97526b8cb409d0c58f8075db16b3aed80bd44efb660be17d9db07a801a43b59a034e9998a5d60b072ab0fe5dd97a86e25568ab6d9d74c7af31171f38eaff0cb0fc24300b48212ec6608ef820fc964a28db4cd8d0526d550efac04bc60f4e4cd3c224cbe0ba45f28e0566930afbca59424e39854793c6e301fdc9cedeb00500db4af5f6c2004568aa4682fc656b1943bb63", 0xf2}, {&(0x7f0000000500)="a1a8c7128cf53b860b5aaa1ccee4ffe846420e0192cfed6fed4eeddf537179b0a83627c3868f3b78b60a61263a90957f69337b2ab0a123218ed0318185135c095b602df299adfc458eef4947ab34782ecdbae8436486efca9d9076f2316af18200a3014efba92706cc1748f7c5925bdaa3aa4516b6f1a6f22f4f1c23d2554620b90ad856298d3087e4519d78d14dd2d1dbb2401cfd330b19ac2fc26b2b4837023a585b846cdc0e007773719a6ad0cf3acb3cde81220e1909741522d75d5c49222f1d0ef6d8bd1b58070df04e658553cb734adbdb98c8482330ed25a1d53b5b3a6c09aa436715f3535c84a943", 0xec}], 0x4) accept(0xffffffffffffffff, 0x0, &(0x7f0000000080)) socket$nl_route(0x10, 0x3, 0x0) r1 = socket(0x0, 0x803, 0x0) syz_genetlink_get_family_id$mptcp(&(0x7f0000000000), r1) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) socket$nl_generic(0x10, 0x3, 0x10) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r3, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r5, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x6, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x3c}}, 0x0) 10:42:29 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000280)=[{&(0x7f0000000080)="218f2637db39c0d8ff8e3e2d1823ce173d7a56d78f715990e3267e785cc758f3d14250b8fd5bf3a2715392ab47e5f51aeb597d522e167fedd038b3bc43373dac615f1a0fde144d66489c286c5682291a25f7b88c4c3a4f3490e8bffdaf1ac9f1306b8b7101f4220bd27b5aa388732dd638c5168d5be7a0971847d94ddba03ab08b408bc30ce7ba1075345e3d40b32f60d495e1a6be13410a8539adaf18d9b25f81f024448fda2fdb73eb0784cf7f785efc956873760664057c92baca8ea8b14ac865ae2e75b7044361a416ca7e2b02fc70809974cffcaba0d9f8fb03", 0xdc}, {&(0x7f0000000180)="923883a4503b25156bd9aa290182ec83e9de9105aefd3066618d75c32c4266d0663c5b259554f949dd5ff9ab5d30f3842f5f0c6c0be93f957a8eff7228b22e0f430c390647ab14af1c797b079a42b0377e23f5078c7f9092e5d18e67b61a46b99cca876623d88735b2164faac31f3e382e75fb963775f17c34c5ae08b78d1323f9ee4522c16723cbf15182d810ff903f5b4cfdf38bf6cb7db34874060ffc828a3d085c69fb11f62a64c653f2376b6551ae1fb3cb14fb22cbd1943ddffdbddb9b2b2473323c64dac59f02ad3c2f4e", 0xce}], 0x2) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000000000)=0xfffffffffffffc03) 10:42:29 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) write$binfmt_misc(r0, &(0x7f0000000080)={'syz0', "d473d9161fe11707d133f20ebd3b2a50ce7e3b3666cf7ab9596e9d4045cde462922b84f95bb3d05c19a40f9b5ba32d914e902c2ff004fc2d0aaffddaefb10679b2461054c720153e136b4fd8308bebcc72e9366ebbea43a0712c54385ff9ffb18acaccb48ae5a9be26fc8f63e2e6581a471e62d96bc963cf832cfafff233c1c89012234f481007cd38aaab0b7faa50465f30de9473039eda72f0d9d289c94c413b622e63be014b"}, 0xab) (async) r1 = openat$cgroup_ro(r0, &(0x7f0000000000)='memory.events.local\x00', 0x0, 0x0) openat$cgroup_ro(r1, &(0x7f0000000140)='devices.list\x00', 0x0, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0x3) 10:42:29 executing program 0: mmap(&(0x7f0000000000/0x400000)=nil, 0x400000, 0xe, 0xc3072, 0xffffffffffffffff, 0x0) (async) openat$cgroup_ro(0xffffffffffffffff, 0x0, 0x275a, 0x0) (async) r0 = accept(0xffffffffffffffff, 0x0, 0x0) syz_genetlink_get_family_id$mptcp(0x0, 0xffffffffffffffff) (async) syz_genetlink_get_family_id$nl80211(0x0, 0xffffffffffffffff) r1 = socket$netlink(0x10, 0x3, 0x0) (async) ioctl$F2FS_IOC_MOVE_RANGE(0xffffffffffffffff, 0xc020f509, &(0x7f0000000000)={r0, 0x0, 0x7f, 0xffffffffffffffff}) (async) r3 = syz_genetlink_get_family_id$mptcp(&(0x7f0000000080), r0) sendmsg$MPTCP_PM_CMD_GET_ADDR(r2, &(0x7f0000000180)={&(0x7f0000000040)={0x10, 0x0, 0x0, 0x2002042}, 0xc, &(0x7f0000000100)={&(0x7f00000001c0)=ANY=[@ANYBLOB="ad3aeeb65bd2bdc461faeba163ee6486f71d449826f97129dae7ad998be4894a166ffa6ed5773d377a21730002d4658692c36cf397f984ffcd2ced2be51ad4b25ccc3501ce18a16c84f06ec1faa182000000002981a578aaeb1e789afcdc7cfbe46e95bae6f976c87f16537011544b80b0441f1e6aa8b0d4890d20d3997117cc2d8155916434e6022c85946055c6860bf76b7c159c66e6ffba37a43319ce3560a5b707c0e6a9b9e533e9f1fac2c8e7155ca249262c1c6b2006e5b1a733b53bfd9025fac12a0587bc17e52c97f1e7b95b2eaae0523f79c936db553cc8e0357f8757e7d785585f5d2e8b955a3f000000000000000000001d19e18dbc20972f3d0272e08bf3d6673123669967b474a2", @ANYRES16=r3, @ANYBLOB="080026bd7000fddbdf2503000000080003000700000008000200010000000800020001000000"], 0x2c}, 0x1, 0x0, 0x0, 0x20048000}, 0x4010000) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={0x0, 0x3c}}, 0x0) (async) sendmsg$nl_route(r1, 0x0, 0x0) [ 2213.189735][ T9155] 8021q: adding VLAN 0 to HW filter on device bond866 10:42:29 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r1 = socket$inet_dccp(0x2, 0x6, 0x0) ioctl$FIDEDUPERANGE(r1, 0xc0189436, &(0x7f0000000080)={0x4, 0x931, 0x1, 0x0, 0x0, [{{r0}, 0x1}]}) ioctl$AUTOFS_IOC_EXPIRE_MULTI(r0, 0x40049366, &(0x7f0000000000)=0x1) r2 = bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000080)={0x6, 0x4, &(0x7f0000001540)=ANY=[@ANYBLOB="18020000000000000000000000000000850000007b00000095000000000000003a64d725989c4db16f3baf71e8e1e0e0fda53265e4986a8c0c5df4890b46f7846cb7e88700f70c9f5bf25e11ea397ef5a10816d06d8cf70fb1ce0f722145d23429d9e7cfb38f06d6534257aa696d61d62cf6a1a78b21361809b6a0395915bdb64fb487cb0fad8a71b0bbd7cd3d179cd3a2adb6b26e449c1d44b4b691a0aa23604699a34918c7d93cf7f4f3bdb02390cab447693161cfe5ce5e07eae50ceb073b257290594a250afbbfdd2d0fedeca5db427aaf183a78f82a56aa9868077e2bb77d396224335038f286ca71f5d1b5dc035167eb7c76d84b5a07fea66f1738894a4ea1ccc9e9049ef948b9e97de4f14e721fa1ba5af84ad4"], &(0x7f0000000100)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) bpf$BPF_PROG_TEST_RUN(0xa, &(0x7f0000000000)={r2, 0xf000, 0x0, 0x41, 0x0, 0x0, 0x41, 0x0, 0x0, 0x0, 0x0, 0x0}, 0x48) ioctl$F2FS_IOC_MOVE_RANGE(r2, 0xc020f509, &(0x7f0000000140)={r2}) write$binfmt_script(r2, &(0x7f00000000c0)={'#! ', './file0', [{0x20, ']'}, {0x20, '(%,)^{%::@!&'}, {0x20, 'memory.events\x00'}], 0xa, "9e7603e4a1e44175ebc64e957db47ed1f718c3692978415a0c86aad269ac2dd31a63285ec308b16fbbabaf379cc6c6bd168b149e18ffd0eb7fad81ba5cd5f8335f8f6b308fa4444534f76a87147da342c406c4c5eeb95397ba5afb7af2c8bfc6d17fdef928d1c79fe6a996e3771d8d1ee85be0c3a295d1cfc6f70b36002e61917940f26274fdbce095bb1ce5313798c3648d3f98b89453fa45a608bb1d695374e7b792fa2bf0e1f74a780d857bc749833bac1505b3dba0aef34fd736b31cf438da561b79e59b1bd2477afdc66dc851adca87dc9b00348be1fb3efe2795369cceedea726a5aecd19cee3fe5f18c227ab6680d899f00ef89864892155576943e9531f49ff69c6774a2be59026122ed621845d4244a6bed81307e8c7b17f8206cbc8fc2df07634d15fb69133fb1d983c15b3ae5e1b958fff17a23a85a5ec50eff033d456fc27969b568a0461de4feb4114d55d0cc47185bf88225f194f557e4d79872b1289646b4735c6526282506ee16f668ffc46b6d08804c18a5a9e1862eb932679822f0f2130925098b3b67ca0f7d7252fb75bf630a43d872c31637bf8cdf66927c2f189167d1bda16f4d6149d9bf45691c364e513077f7ced0dc8a287eee7f807c929d7161ed12427d8eaf1331c1fa43bc0bff8a4be8001e979b84e5f6199e9a046b62bf6854a339e2967b66660cc90eaaaed7a069e6cfc487508e98f9ccc575c6acf29ea0cd87b1d6209ad4a65179860b1298f2968494008d133096db8c5ab68392d140992af9e48da291f6e81510978db21eb87c2be9dc412c51cb1aad013eeed0c2bf718ebab6bf1a21e0c6666eb00e7d96da87a3df495bb52ac72a02b0c358b46e2b59d0e349748711aad5b5ceb1361a520f880e7fe310f991d134c1305a915e88731f783bf8c7dc46a84deccfbd7c373af0c2ff88d28b46983cc343dd01bd2c0463a75077cfdc4a2ab462cfa46c56421dd1e5ff8b189de94029089f91c547155e2018cf5da164b454a7b69cd7db613f35334331b3094eb38fde75c23a3a351e7ffd472b7dbc3b7dcdbc7db97d2cd99d24f612629a49429eb4e4b9dbf380a1a5dac882613147d20ccc55e27c07728acb5fcd3e4ec1d7880dd4b6c8df2ccfa285f8340e69cc0efd5380f9e78856e2921e9205cb7a2638e2b538c792b602fb09f80c0b89181928c233b97346d0160ec88dd0b148f8d64dcb1491b51e5b9863500419fd4e2a62394cbcd7182a424884e7a3205cad8a4f6fc58b7f3116924591903d7e9f2584789222c1d2159390fc163c4bb68865999ce45226a254a7502a152d3fb0a3a52aa0cfef6f444d4e1098aece0444605b2a766964d2a20f8cfb425695ed89d1454164b240c2ef2e40ff07908ab28366a165e7024b463a8361e8046ffd5232483d683a3e45b57e9f5be71436aff5828a401f9e41d3723c5a9ce34cfa5bf12f8bc8d0030a97a3e78da4fe440fb6eff70a3f7a2f01235c354a4462fc8404283dc0ba51903e6538210578c222e8daa10f431c0e8864aa8239771286235b4ef5ddfa5a6fb308cf5d1b9fd35bc5ada9e19876f159dda17850cbb23f0c45dcbabb2b707fd0aaac6d65e8370797e2a475e8f7b082e22af04c7c53b3a485f8cb17b9e3b986a76edd11568168472629d343aef6faca0e61b9bf748decc295cfeae6ff0a281620505bfe740769bb16013893c41937478574fc851a376e4ffed4f1623c309e313f74eaf0757d78c45aac7871e8fc12e0c52475abd29424f0715999ee1326a2dfdd5dafdf92ee886f31a681a63842f448c63ac584ddf141e239e9d262b4592adab8ef7b8cfe279f294050a23d78c3148b54e8a1f3d7cdf02d35c38b24141010da9d1b94b67804ece71b3d5ec3395c81f16e5adef2f51696c7b3fd58075a9e566f9acbc3e1783eb63190b2fcf872ecf05df5b8f6a78342d47ac7db665284aa1bd722a0afad44a6ae2e477ed0a81a896abe40a732ba411cb116336784296e1d05194ff12844cd753b3bfb678beaa9beaf33acdf51924a1e131f492132cb4fb98013e43623ed78e7d4bdd9a39778b7b05320ec6d1b6f8f2a25c55479513dacc26862aabec0b95d3361b19783bb948299dd2880ae397570a7930a0aec3cb6ef872297595847377ee728b4ec8796e4a59b8b3b7bc1b0d682e695d89947d3c156dce71ee9952a35c92a40527be7b7d6a715b4555121534e2e0db39a6b96e68fc64a0956f0062963e3060eef34c54d3ea87bb175c2a066dbf0fb65d4eb8b3bb0f58299e92cad9f937194717edce9ad4d89e5df750edc9e361ed2da9a7d8a0dee28feb4c160ff4f9a8f11e6d2bed79256498eea950724c40ce38fea102bcb83f9e6db43bb4f7f0d6adb479474391c422639b13a14a1b39caa7d4588c15f56b581c7a4191ee7b5afaeade9f26fe4f6cb1464c6abd9f085af74b92b27e003b65e53a3273023ca99db8158c731bda7f8ef3a355d444b8c9a0a3d0c10d7e7aa629a75783c1f85965dc090f7d8efba33dd2febadea5230f81317bbc46baabd83d76e95eecdb1ac8cd6bf24ddeba2acdad2da97da8e246c92e51b456c38793e266d4af4074546f4c93b12c9cba6d20e47e407ce8cd92ace7a65b2fcf807f67f49ceb361644a5b8ed3727ff8c9786a977a6d3174f2eacb5f24f2327d85b277ab67ae4c61a6b4c0976589689c5ebdfb1beb20c785b23c8909aa01f327677f3e1b964afca7665a6887b09cdb3c11cf76054e8a8fbd5567cdb2152e720280710f487192239dc59021d5e0659514e8347618735b4f276db8010fd7dd5d32f08163b94b4b494fbe7f9dce7a9f33f31a67596e5fd35e79dc0ee671cbfd531d9e0ade65242c225d573a76943cb4dfd8b2474cd24e364c44342770cc64e7ef0aae0bf839dc66a87fa52dfd36661c0090a3982884611b4f264641ee9e19a7464f4e03b0f6252c10875942f0ce2b539e7878cee911c2e201cd8de56dc6a203b182715fb1dcfa849bf755ae718326c29c30e321bbe306dfb258fcf086d64f834802df5f5104de4e2e8850da09b4f54e28fba7c505f3931e85f3765b0911b9b8d897a9115867b1d1af8ecfbb927a10d465d62d756f73e0e488a42218cf2b281777cb3777a36291893a8540ee51f95fb5c2e397f2f0ec311edfad82a4062fb17e9cb3e6c3cef4a3814a7725227a620f2777ec44d7542190dda293dea814a5f33fac16830477853ae2cc455420c936f0d8682f64337e188167525b0f924f1847d95f5c1d3b09ab2904b714e94d46578213969df3a07adc30a77a0d455007edbc1591b9bbca792b3ac06ef34e7665b938f4299e1780c058ee156745e49e8d7d693590ab894fb593745c577a09ac6a04246293a85b6019e4ba2a6f48e1a8de69963d7e619c8ede3fba7574a126ad9c12ad0fa825ed810b2cd7b8e5091775479b991094943421042b60de128c38bc4fd34ccb81a33e7b209f4cb99f3b68fa890ddfc2d05ede24f5b9419445edee7ea6a39da8104044a20e1ed0c2fe2d6f0b4e75ec913247960e9e579a7e808661423c6bb9191afa71e2deb9146b06fcb3af2c2464024fdf6c82f2ea6c8c8409ed4730c541ce7cbde6f90ac60fc9a022ad21d2df6462dbe9a0747bf0aaa50417eb30672f7edded479194547c07782b25cdeb5d1e35df46871e575ac52879f7398ce31f818def09ab201538d8ff99394a0134e730b78b12c7a8f6e5e5841072be805b32ca32490d43f3d52204c9c1a18ba17979cfec146d41b35399f54a63a91476dfde458697ce21fac512405a0a9bbbe3144cb5baf35ff7299e783158e73f3f4b074472ef38880c631d33c60d62aba072a683f2d6fe4ef202ef581b23e7b0f5ead97eb0cf7cf3b47f9a07fc98f4ed41dd7454f626e5aaefa096a7ec6cfe4b4f0878bf43c4148dda0d9b0132762072f7e16b54f52391b0c40cca49908ac4fad6b846293cf9cad87c55dcf6d3cff431a721bc7258dcb89ab4348a5c9b504a4ee011fe1b3195264673dc9060ad0f8a5286bcf16018ae1a90d30b5f88f8fd51ef5148474d6c45adcb8a5e7379eaa3ca0dd5e1a18812c039068cb0989a73bbccde8d6acb6ad8091fba76fa270e664cd76c8150cf737aa391a62a7efc814b0881d58ad9e8d1fbe382b161e22e30c99dc570ef657dcc9b5b175089eb73f04c01c8fde93148ce84aae3927777e9f66746bf2d23ded985ada87fe3115c18b1a72265dcb54cdcaba8556aa6a9531cd312459330929dc84314ed4edd90425ed9c2710d9513ef79860106cf0b9771abc37fa5520762c44f14595358a9c1fc11c7621089adabb4313ea1c1a534ee402c6995492a0062d1425309ffc929b7a14a95376714e11f25c8600f00edf9de5642a82c5e4e77beec5f7407c2c86ddc827ae8fb2b8f10b4c9f95d1931c91402b5ea199d63044ea301746cb26e948d370d515ee57727d3fa1c1166cae03df6ed86bb5eaa3c5cee8bae3caacf3be5095beb7617b90b51ef58b2b8fff15791d937bfd37988c334a6a375360607da290ad0b83cf4594394fefdd384ff598b65c3719f244cf0e63a12fe1e19798510bdab3b6d5ccf7b78b97ead7f11a4b7f6b5c2d6e77e1c7b68c3f07e2fc6ab97bcc2ac5afdec121e75efa8a6717c0ecaabbc6fd755559f33cc0dc878283b2f41f565c63535eadd1b66e492536a08c10392dc640cdf2fd7d5c4bc291f513723159f0f9737dd98478d7dfa1fea3c4ff22385f37da951a9462d2c5556bf947abf86f5aadd9975fe2f152aaa9d7f36aba2381c4fa27b7784ac5e1b9d87f21f1eef80abe28d20ccff7aa4615edb06e92ab0657d4711e31403242ddaa9cff5b78e6a43827bf12b77e257aef48b3cff8ac96d40f58158e5ec4b2326435ed7ab4a12d0ad0be865e49647cb76dab385eeebc3a24cb4a5b4684331279723506adf9e5c97c09391f2740a399708bcc4c87dc989f183a5cb34a6637cc31b52e43bcf7df28b48489de041d368c924fc3e4eca6601c18e1a6e238e892eb6afe3e5df0d233e1769e1fc455a4b45a2a0fccaf7fac1f6bfda2488dc3d1e2c5123db9408702defb6698376eec12590d6fa3aa07176fbfce0787f46b9ea53b1aa8c21ad8dbdb27f63df716383c0082a6bb83bc16845381b7cc55601424e45387dd468320c61cf4a05f691c6119e8c3fe6bde8c8483b235ebfb025dfd0cd4c3695c4cf61c1ac493d34af2fa7feb22ea93b8c41bcc8f5bd179e6a317c5a486e200eb5fd6c49285ffed13008635dda60d4464fb65412a1a5a72c027620ea1bceb0e52e7b29568763577611e142ddc2baa2169fbff74a86f1052678fd8a7e782f69e4d364ab5077b974582d68ae261f1c7a35cf6c423ded877de4e3d921d49f144cf18d58a3e1c3006d089bb8243edbcb7e87fa44e1e2b2922398a2cfa56ee47e973688db0fc816133b7bcf97d0c44a9aeda7c9dd5a418782d1a7ad1f454dd53f410c3ebe2b1ea13ca7949c3b2ca1b7d765cf207cc953ff39c1490f9be03ad816a75e38c6033a9081685c712b1d3678382254e4b9fa85776a904e97d3f6d3f0ce6414aa7911e89a7347745ebfbd17ae6694c312bfef4cfdfb3b60baa2ac504f5b74d767e1987b5d6f7c9e94150e15cf469d36395f9aec80d40236ee80aaded55e0d3361ec3d4732bf9428d7e40440b71a4954478c573040b114bdf358f225f2c5decff1488fec02155b49a9f3eeb5f65b8618d95fa1aada4127b44daec194136a40a1792e44ee87ff582af0cc3e066be7dfc3cb40de92cd86a3b9bd416c8d820df215ab4aaec1810f29e32b34c119781e15a0f24da4"}, 0x1029) r3 = openat$cgroup_ro(r0, &(0x7f0000001100)='memory.swap.events\x00', 0x0, 0x0) bpf$BPF_PROG_TEST_RUN(0xa, &(0x7f0000001480)={r3, 0x0, 0x19, 0xd7, &(0x7f0000001140)="ef4a0087203e0966087d0cf584a2fe35ae0a5fbb2ae58e15b3", &(0x7f0000001180)=""/215, 0x6, 0x0, 0xe1, 0xfb, &(0x7f0000001280)="3b861eb8485a02bdfdfc87414381fa085b55a58a463078c26bb26720fad10ec1b7f881a45e4e4aea87e49653c0f4a579a86b5f1120e5c04dacfe7d5f1bd21d266d587ac72d12294addb478eb42f495585c5b08c4729e6b6cfcbbcbdaf8c8b491496b1445ca1b2a35786a917f444db7b4d28cedb00d24a84e03797de6c3ec9a57a5539b016b11ad1184aa19b3c67a254d362416693c1545a9ca09d81358bcb10b9cf4e8b08f57cd40e6932e2b5aa110b759d41bcc0e3ef49765f524fcb49bff1f2e47242492d5a29535b28636a2e0b6b33949e103e9059d9d1b4607f73227cfc1eb", &(0x7f0000001380)="2d763deb05b23c5ff1f3111e9f9c6c2f96bfac5fcff6f2aa9aeed2493b86a86d885994e80e699ad6f4fad17ac6b91e54412469cbd311380310da51be52f5434d54238aba4aed840ecd1a175b97f63a8de31d7a698cc48b2af0fb2046307fd02f319cae134ce6c0c76fb7b358fe83ffd800f31664db5f1dd55d6d11ccd0957d5ca82b99f1e3664c1972cbbe888217cf28ef432d13cfecb205dd040bce623820b1c3cd13f54bfeb4dea06699eaa701c872694ca9296ef327b847dc883b1d7dc14ca39caf31299f0b0f01d8678fe0fa011c127c951473a62756033ea5506e741d70139b9524d71709e2f00d8d2512f426f315da658f984a0ed0df95cf"}, 0x48) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) sendfile(0xffffffffffffffff, r4, 0x0, 0x8000000000004) write$cgroup_netprio_ifpriomap(r4, &(0x7f0000001500)={'hsr0', 0x32, 0x37}, 0x7) [ 2213.383833][ T9176] 8021q: adding VLAN 0 to HW filter on device bond1431 [ 2213.436906][ T9177] bond1431: (slave bridge1288): making interface the new active one [ 2213.450278][ T9177] bond1431: (slave bridge1288): Enslaving as an active interface with an up link 10:42:29 executing program 1: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f00000001c0)={0x0, 0x0, &(0x7f0000000180)={0x0, 0x24}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000200)=0x14) sendmsg$nl_route(r1, &(0x7f0000000140)={0x0, 0x0, &(0x7f00000003c0)={&(0x7f0000000040)=ANY=[@ANYBLOB="3c00000010008506000000000000000000002000", @ANYRES32=r3, @ANYBLOB="5977f29225186fb21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f00000000c0)={0x0, 0x3b899c8c, &(0x7f0000000300)={&(0x7f0000000240)=@newlink={0x3c, 0x10, 0xffffff1f, 0x0, 0x0, {0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, [@IFLA_LINKINFO={0x14, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x4}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x3c}}, 0x0) 10:42:29 executing program 4: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) writev(r0, &(0x7f0000000280)=[{&(0x7f0000000080)="218f2637db39c0d8ff8e3e2d1823ce173d7a56d78f715990e3267e785cc758f3d14250b8fd5bf3a2715392ab47e5f51aeb597d522e167fedd038b3bc43373dac615f1a0fde144d66489c286c5682291a25f7b88c4c3a4f3490e8bffdaf1ac9f1306b8b7101f4220bd27b5aa388732dd638c5168d5be7a0971847d94ddba03ab08b408bc30ce7ba1075345e3d40b32f60d495e1a6be13410a8539adaf18d9b25f81f024448fda2fdb73eb0784cf7f785efc956873760664057c92baca8ea8b14ac865ae2e75b7044361a416ca7e2b02fc70809974cffcaba0d9f8fb03", 0xdc}, {&(0x7f0000000180)="923883a4503b25156bd9aa290182ec83e9de9105aefd3066618d75c32c4266d0663c5b259554f949dd5ff9ab5d30f3842f5f0c6c0be93f957a8eff7228b22e0f430c390647ab14af1c797b079a42b0377e23f5078c7f9092e5d18e67b61a46b99cca876623d88735b2164faac31f3e382e75fb963775f17c34c5ae08b78d1323f9ee4522c16723cbf15182d810ff903f5b4cfdf38bf6cb7db34874060ffc828a3d085c69fb11f62a64c653f2376b6551ae1fb3cb14fb22cbd1943ddffdbddb9b2b2473323c64dac59f02ad3c2f4e", 0xce}], 0x2) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000000000)=0xfffffffffffffc03) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) (async) writev(r0, &(0x7f0000000280)=[{&(0x7f0000000080)="218f2637db39c0d8ff8e3e2d1823ce173d7a56d78f715990e3267e785cc758f3d14250b8fd5bf3a2715392ab47e5f51aeb597d522e167fedd038b3bc43373dac615f1a0fde144d66489c286c5682291a25f7b88c4c3a4f3490e8bffdaf1ac9f1306b8b7101f4220bd27b5aa388732dd638c5168d5be7a0971847d94ddba03ab08b408bc30ce7ba1075345e3d40b32f60d495e1a6be13410a8539adaf18d9b25f81f024448fda2fdb73eb0784cf7f785efc956873760664057c92baca8ea8b14ac865ae2e75b7044361a416ca7e2b02fc70809974cffcaba0d9f8fb03", 0xdc}, {&(0x7f0000000180)="923883a4503b25156bd9aa290182ec83e9de9105aefd3066618d75c32c4266d0663c5b259554f949dd5ff9ab5d30f3842f5f0c6c0be93f957a8eff7228b22e0f430c390647ab14af1c797b079a42b0377e23f5078c7f9092e5d18e67b61a46b99cca876623d88735b2164faac31f3e382e75fb963775f17c34c5ae08b78d1323f9ee4522c16723cbf15182d810ff903f5b4cfdf38bf6cb7db34874060ffc828a3d085c69fb11f62a64c653f2376b6551ae1fb3cb14fb22cbd1943ddffdbddb9b2b2473323c64dac59f02ad3c2f4e", 0xce}], 0x2) (async) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000000000)=0xfffffffffffffc03) (async) 10:42:29 executing program 2: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='memory.events\x00', 0x275a, 0x0) ioctl$EXT4_IOC_GROUP_EXTEND(r0, 0x40086607, &(0x7f0000001d40)=0xfffffffffffffbff) r1 = socket$inet_dccp(0x2, 0x6, 0x0) ioctl$FIDEDUPERANGE(r1, 0xc0189436, &(0x7f0000000080)={0x4, 0x931, 0x1, 0x0, 0x0, [{{r0}, 0x1}]}) ioctl$AUTOFS_IOC_EXPIRE_MULTI(r0, 0x40049366, &(0x7f0000000000)=0x1) (async) r2 = bpf$PROG_LOAD_XDP(0x5, &(0x7f0000000080)={0x6, 0x4, &(0x7f0000001540)=ANY=[@ANYBLOB="18020000000000000000000000000000850000007b00000095000000000000003a64d725989c4db16f3baf71e8e1e0e0fda53265e4986a8c0c5df4890b46f7846cb7e88700f70c9f5bf25e11ea397ef5a10816d06d8cf70fb1ce0f722145d23429d9e7cfb38f06d6534257aa696d61d62cf6a1a78b21361809b6a0395915bdb64fb487cb0fad8a71b0bbd7cd3d179cd3a2adb6b26e449c1d44b4b691a0aa23604699a34918c7d93cf7f4f3bdb02390cab447693161cfe5ce5e07eae50ceb073b257290594a250afbbfdd2d0fedeca5db427aaf183a78f82a56aa9868077e2bb77d396224335038f286ca71f5d1b5dc035167eb7c76d84b5a07fea66f1738894a4ea1ccc9e9049ef948b9e97de4f14e721fa1ba5af84ad4"], &(0x7f0000000100)='GPL\x00', 0x0, 0x0, 0x0, 0x0, 0x0, '\x00', 0x0, 0x25, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x80) bpf$BPF_PROG_TEST_RUN(0xa, &(0x7f0000000000)={r2, 0xf000, 0x0, 0x41, 0x0, 0x0, 0x41, 0x0, 0x0, 0x0, 0x0, 0x0}, 0x48) (async) ioctl$F2FS_IOC_MOVE_RANGE(r2, 0xc020f509, &(0x7f0000000140)={r2}) (async) write$binfmt_script(r2, &(0x7f00000000c0)={'#! ', './file0', [{0x20, ']'}, {0x20, '(%,)^{%::@!&'}, {0x20, 'memory.events\x00'}], 0xa, "9e7603e4a1e44175ebc64e957db47ed1f718c3692978415a0c86aad269ac2dd31a63285ec308b16fbbabaf379cc6c6bd168b149e18ffd0eb7fad81ba5cd5f8335f8f6b308fa4444534f76a87147da342c406c4c5eeb95397ba5afb7af2c8bfc6d17fdef928d1c79fe6a996e3771d8d1ee85be0c3a295d1cfc6f70b36002e61917940f26274fdbce095bb1ce5313798c3648d3f98b89453fa45a608bb1d695374e7b792fa2bf0e1f74a780d857bc749833bac1505b3dba0aef34fd736b31cf438da561b79e59b1bd2477afdc66dc851adca87dc9b00348be1fb3efe2795369cceedea726a5aecd19cee3fe5f18c227ab6680d899f00ef89864892155576943e9531f49ff69c6774a2be59026122ed621845d4244a6bed81307e8c7b17f8206cbc8fc2df07634d15fb69133fb1d983c15b3ae5e1b958fff17a23a85a5ec50eff033d456fc27969b568a0461de4feb4114d55d0cc47185bf88225f194f557e4d79872b1289646b4735c6526282506ee16f668ffc46b6d08804c18a5a9e1862eb932679822f0f2130925098b3b67ca0f7d7252fb75bf630a43d872c31637bf8cdf66927c2f189167d1bda16f4d6149d9bf45691c364e513077f7ced0dc8a287eee7f807c929d7161ed12427d8eaf1331c1fa43bc0bff8a4be8001e979b84e5f6199e9a046b62bf6854a339e2967b66660cc90eaaaed7a069e6cfc487508e98f9ccc575c6acf29ea0cd87b1d6209ad4a65179860b1298f2968494008d133096db8c5ab68392d140992af9e48da291f6e81510978db21eb87c2be9dc412c51cb1aad013eeed0c2bf718ebab6bf1a21e0c6666eb00e7d96da87a3df495bb52ac72a02b0c358b46e2b59d0e349748711aad5b5ceb1361a520f880e7fe310f991d134c1305a915e88731f783bf8c7dc46a84deccfbd7c373af0c2ff88d28b46983cc343dd01bd2c0463a75077cfdc4a2ab462cfa46c56421dd1e5ff8b189de94029089f91c547155e2018cf5da164b454a7b69cd7db613f35334331b3094eb38fde75c23a3a351e VM DIAGNOSIS: Warning: Permanently added '10.128.0.235' (ECDSA) to the list of known hosts. lock-classes: 6364 [max: 8192] direct dependencies: 46242 [max: 131072] indirect dependencies: 796628 all direct dependencies: 2179685 dependency chains: 259893 [max: 262144] dependency chain hlocks used: 1310717 [max: 1310720] dependency chain hlocks lost: 1 in-hardirq chains: 97 in-softirq chains: 2325 in-process chains: 257470 stack-trace entries: 274722 [max: 1048576] number of stack traces: 14269 number of stack hash chains: 9596 combined max dependencies:hardirq-safe locks: 58 hardirq-unsafe locks: 5659 softirq-safe locks: 304 softirq-unsafe locks: 5309 irq-safe locks: 319 irq-unsafe locks: 5659 hardirq-read-safe locks: 5 hardirq-read-unsafe locks: 196 softirq-read-safe locks: 21 softirq-read-unsafe locks: 180 irq-read-safe locks: 21 irq-read-unsafe locks: 196 uncategorized locks: 389 unused locks: 0 max locking depth: 30 max bfs queue depth: 596 max lock class index: 6363 debug_locks: 0 zapped classes: 3323 zapped lock chains: 7651 large chain blocks: 0 all lock classes: FD: 37 BD: 1 +.+.: fill_pool_map-wait-type-override ->&____s->seqcount ->pool_lock#2 ->&c->lock ->pool_lock ->&zone->lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->rcu_node_0 ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&____s->seqcount#2 ->init_task.mems_allowed_seq.seqcount ->&rcu_state.expedited_wq FD: 2 BD: 5126 -.-.: &obj_hash[i].lock ->pool_lock FD: 1 BD: 5126 -.-.: pool_lock FD: 754 BD: 15 +.+.: cgroup_mutex ->pcpu_alloc_mutex ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&obj_hash[i].lock ->cgroup_file_kn_lock ->css_set_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->blkcg_pol_mutex ->&n->list_lock ->percpu_counters_lock ->shrinker_rwsem ->&base->lock ->batched_entropy_u8.lock ->&pgdat->memcg_lru.lock ->devcgroup_mutex ->cpu_hotplug_lock ->fs_reclaim ->&rq->__lock ->cgroup_rstat_lock ->cpuset_rwsem ->cpuset_rwsem.waiters.lock ->cpuset_rwsem.rss.gp_wait.lock ->&dom->lock ->batched_entropy_u32.lock ->cgroup_idr_lock ->task_group_lock ->(wq_completion)cpuset_migrate_mm ->&wq->mutex ->&____s->seqcount#2 ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->stock_lock ->cgroup_mutex.wait_lock FD: 1 BD: 4087 -...: (console_sem).lock FD: 198 BD: 12 +.+.: console_lock ->console_owner_lock ->resource_lock ->pool_lock#2 ->&obj_hash[i].lock ->&zone->lock ->&____s->seqcount ->&c->lock ->kbd_event_lock ->vga_lock ->(console_sem).lock ->fs_reclaim ->&x->wait#9 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#11 ->&fb_info->lock ->&base->lock ->subsys mutex#5 ->&helper->lock ->&helper->damage_lock ->&lock->wait_lock ->&p->pi_lock ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->vt_event_lock ->&meta->lock FD: 1 BD: 11 ....: console_srcu FD: 265 BD: 107 ++++: cpu_hotplug_lock ->jump_label_mutex ->static_call_mutex ->cpuhp_state_mutex ->wq_pool_mutex ->freezer_mutex ->rcu_tasks_trace__percpu.cbs_pcpu_lock ->&ACCESS_PRIVATE(rtpcp, lock) ->smpboot_threads_lock ->&obj_hash[i].lock ->&pool->lock ->&x->wait#4 ->&rq->__lock ->mem_hotplug_lock ->mem_hotplug_lock.waiters.lock ->mem_hotplug_lock.rss.gp_wait.lock ->cpu_hotplug_lock.rss.gp_wait.lock ->rcu_node_0 ->&swhash->hlist_mutex ->pmus_lock ->pcp_batch_high_lock ->&xa->xa_lock ->fs_reclaim ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&x->wait ->wq_pool_attach_mutex ->pcpu_alloc_mutex ->relay_channels_mutex ->&c->lock ->&n->list_lock ->&zone->lock ->&____s->seqcount ->sparse_irq_lock ->&x->wait#6 ->cpuhp_state-up ->stop_cpus_mutex ->&wq->mutex ->flush_lock ->&md->mutex ->&irq_desc_lock_class ->xps_map_mutex ->css_set_lock ->cpuset_rwsem ->cpuset_rwsem.waiters.lock ->cpuset_rwsem.rss.gp_wait.lock ->cgroup_threadgroup_rwsem ->cgroup_threadgroup_rwsem.waiters.lock ->cgroup_threadgroup_rwsem.rss.gp_wait.lock ->&list->lock#5 ->(work_completion)(flush) ->&x->wait#10 ->jump_label_mutex.wait_lock FD: 66 BD: 115 +.+.: jump_label_mutex ->text_mutex ->&rq->__lock ->text_mutex.wait_lock ->&p->pi_lock ->jump_label_mutex.wait_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 65 BD: 108 +.+.: static_call_mutex ->text_mutex ->text_mutex.wait_lock ->&p->pi_lock ->&rq->__lock FD: 64 BD: 128 +.+.: text_mutex ->ptlock_ptr(page)#2 ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq ->text_mutex.wait_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 204 BD: 10 +.+.: console_mutex ->syslog_lock ->(console_sem).lock ->&port_lock_key ->console_lock ->console_srcu_srcu_usage.lock ->&ACCESS_PRIVATE(sdp, lock) ->console_srcu ->&root->kernfs_rwsem ->kernfs_notify_lock ->&rq->__lock FD: 28 BD: 11 +.+.: syslog_lock ->&rq->__lock FD: 1 BD: 4087 -...: console_owner_lock FD: 33 BD: 4086 -...: console_owner ->console_owner_lock ->&port_lock_key FD: 1 BD: 136 ..-.: input_pool.lock FD: 217 BD: 108 +.+.: cpuhp_state_mutex ->cpuhp_state-down ->cpuhp_state-up ->&p->pi_lock ->&x->wait#6 ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->fs_reclaim ->lock ->&root->kernfs_rwsem ->&c->lock ->&zone->lock ->&____s->seqcount ->&n->list_lock ->crypto_alg_sem ->scomp_lock FD: 267 BD: 1 +.+.: clocksource_mutex ->watchdog_lock ->cpu_hotplug_lock ->(console_sem).lock FD: 1 BD: 2 ....: watchdog_lock FD: 4 BD: 111 ++++: resource_lock ->pool_lock#2 ->&obj_hash[i].lock FD: 1 BD: 1 ....: cache_disable_lock FD: 1 BD: 4382 +.+.: pgd_lock FD: 30 BD: 262 +.+.: init_mm.page_table_lock ->pgd_lock ->&obj_hash[i].lock FD: 1 BD: 1 ....: early_pfn_lock FD: 143 BD: 1 +.+.: acpi_ioapic_lock ->ioapic_lock ->(console_sem).lock ->ioapic_mutex FD: 2 BD: 128 ....: ioapic_lock ->i8259A_lock FD: 1 BD: 1 +.+.: syscore_ops_lock FD: 1 BD: 1 ....: map_entries_lock FD: 1 BD: 7 ....: devtree_lock FD: 3 BD: 4413 ..-.: pcpu_lock ->stock_lock FD: 79 BD: 65 +.+.: param_lock ->rate_ctrl_mutex ->disk_events_mutex FD: 1 BD: 5098 ..-.: base_crng.lock FD: 1 BD: 1 ....: rcu_read_lock FD: 1 BD: 1 ....: crng_init_wait.lock FD: 2 BD: 1 ....: zonelist_update_seq ->zonelist_update_seq.seqcount FD: 1 BD: 2 ....: zonelist_update_seq.seqcount FD: 1 BD: 1 +.+.: dmar_global_lock FD: 2 BD: 5015 -.-.: &zone->lock ->&____s->seqcount FD: 1 BD: 5074 .-.-: &____s->seqcount FD: 3 BD: 5 +.+.: &pcp->lock ->&zone->lock FD: 1 BD: 5146 -.-.: pool_lock#2 FD: 81 BD: 190 +.+.: pcpu_alloc_mutex ->pcpu_lock ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&zone->lock ->&____s->seqcount ->init_mm.page_table_lock ->&c->lock ->&rq->__lock ->pcpu_alloc_mutex.wait_lock ->&obj_hash[i].lock ->&cfs_rq->removed.lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&pool->lock ->&n->list_lock ->remove_cache_srcu ->pgd_lock ->stock_lock ->key ->percpu_counters_lock FD: 6 BD: 5042 -.-.: &n->list_lock ->&c->lock FD: 5 BD: 5082 -.-.: &c->lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 124 BD: 78 +.+.: slab_mutex ->pool_lock#2 ->&c->lock ->&n->list_lock ->pcpu_alloc_mutex ->&zone->lock ->&____s->seqcount ->fs_reclaim ->&rq->__lock ->remove_cache_srcu ->&obj_hash[i].lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->lock ->&root->kernfs_rwsem ->&k->list_lock ->&____s->seqcount#2 FD: 3 BD: 5 ....: batched_entropy_u64.lock ->crngs.lock FD: 2 BD: 5097 ..-.: crngs.lock ->base_crng.lock FD: 4 BD: 1 ....: espfix_init_mutex ->&zone->lock ->&____s->seqcount ->pool_lock#2 FD: 1 BD: 4384 ..-.: percpu_counters_lock FD: 9 BD: 4451 +.+.: &mm->page_table_lock ->&obj_hash[i].lock ->pool_lock#2 ->&meta->lock ->kfence_freelist_lock ->stock_lock ->quarantine_lock FD: 10 BD: 4455 +.+.: ptlock_ptr(page) ->lock#4 FD: 60 BD: 4471 +.+.: ptlock_ptr(page)#2 ->lock#4 ->ptlock_ptr(page)#2/1 ->key ->&____s->seqcount ->pool_lock#2 ->lock#5 ->&obj_hash[i].lock ->&lruvec->lru_lock ->&mapping->private_lock ->&folio_wait_table[i] ->&p->lock#2 ->mmlist_lock ->&cache->free_lock ->&xa->xa_lock#19 FD: 94 BD: 3 +.+.: trace_types_lock ->fs_reclaim ->pool_lock#2 ->pin_fs_lock ->&sb->s_type->i_mutex_key#5 ->&obj_hash[i].lock FD: 1 BD: 1 ....: panic_notifier_list.lock FD: 1 BD: 1 ....: die_chain.lock FD: 96 BD: 4 +.+.: trace_event_sem ->trace_event_ida.xa_lock ->&rq->__lock ->trace_event_sem.wait_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->fs_reclaim ->batched_entropy_u8.lock ->kfence_freelist_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#5 ->&c->lock ->&zone->lock ->&____s->seqcount FD: 3 BD: 4132 ..-.: batched_entropy_u32.lock ->crngs.lock FD: 27 BD: 4962 -.-.: &rq->__lock ->&per_cpu_ptr(group->pcpu, cpu)->seq ->&obj_hash[i].lock ->&base->lock ->&rq->__lock/1 ->&cfs_rq->removed.lock ->&rt_b->rt_runtime_lock ->&cp->lock ->pool_lock#2 ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->cid_lock FD: 1 BD: 4963 ....: &cfs_b->lock FD: 28 BD: 113 ....: init_task.pi_lock ->&rq->__lock FD: 1 BD: 1 ....: init_task.vtime_seqcount FD: 93 BD: 112 +.+.: wq_pool_mutex ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&wq->mutex ->&obj_hash[i].lock ->&pool->lock/1 ->fs_reclaim ->kthread_create_lock ->&p->pi_lock ->&rq->__lock ->&x->wait ->wq_pool_attach_mutex ->(console_sem).lock ->&n->list_lock ->&xa->xa_lock ->&____s->seqcount#2 ->remove_cache_srcu ->rcu_node_0 ->&cfs_rq->removed.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->quarantine_lock FD: 33 BD: 123 +.+.: &wq->mutex ->&pool->lock ->&pool->lock/1 ->&x->wait#10 ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 30 BD: 4690 -.-.: &pool->lock ->&obj_hash[i].lock ->&p->pi_lock ->pool_lock#2 ->&base->lock ->wq_mayday_lock FD: 32 BD: 4563 ..-.: &pool->lock/1 ->&obj_hash[i].lock ->&p->pi_lock ->pool_lock#2 ->&base->lock ->wq_mayday_lock ->&x->wait#10 FD: 103 BD: 59 ++++: shrinker_rwsem ->pool_lock#2 ->&c->lock ->&n->list_lock ->&zone->lock ->&____s->seqcount ->fs_reclaim ->&rq->__lock FD: 1 BD: 4543 -.-.: rcu_node_0 FD: 4 BD: 68 -.-.: rcu_state.barrier_lock ->rcu_node_0 ->&obj_hash[i].lock FD: 31 BD: 3 ....: &rnp->exp_poll_lock FD: 9 BD: 5 ....: trace_event_ida.xa_lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock FD: 1 BD: 1 ....: trigger_cmd_mutex FD: 1 BD: 129 ....: i8259A_lock FD: 77 BD: 109 +.+.: irq_domain_mutex ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock FD: 33 BD: 261 +.+.: free_vmap_area_lock ->&obj_hash[i].lock ->pool_lock#2 ->init_mm.page_table_lock ->quarantine_lock ->&meta->lock ->kfence_freelist_lock ->&____s->seqcount FD: 1 BD: 262 +.+.: vmap_area_lock FD: 6 BD: 125 -.-.: &irq_desc_lock_class ->i8259A_lock ->vector_lock ->ioapic_lock ->mask_lock ->tmp_mask_lock FD: 37 BD: 72 +.+.: vmap_purge_lock ->purge_vmap_area_lock ->free_vmap_area_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 8 BD: 82 +.+.: purge_vmap_area_lock ->&obj_hash[i].lock ->pool_lock#2 ->&____s->seqcount ->quarantine_lock ->&meta->lock ->kfence_freelist_lock FD: 2 BD: 71 +.+.: cpa_lock ->pgd_lock FD: 5 BD: 2 -.-.: timekeeper_lock ->tk_core.seq.seqcount ->pvclock_gtod_data FD: 4 BD: 5007 ----: tk_core.seq.seqcount ->&obj_hash[i].lock ->pvclock_gtod_data FD: 13 BD: 5017 -.-.: &base->lock ->&obj_hash[i].lock FD: 154 BD: 110 +.+.: pmus_lock ->pcpu_alloc_mutex ->&obj_hash[i].lock ->pool_lock#2 ->&cpuctx_mutex ->fs_reclaim ->&k->list_lock ->lock ->&root->kernfs_rwsem ->uevent_sock_mutex ->running_helpers_waitq.lock ->&c->lock ->&____s->seqcount ->&x->wait#9 ->&zone->lock ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->&dev->power.lock ->dpm_list_mtx ->subsys mutex#29 FD: 1 BD: 110 +.+.: &swhash->hlist_mutex FD: 1 BD: 111 +.+.: &cpuctx_mutex FD: 1 BD: 2 ....: tty_ldiscs_lock FD: 2 BD: 13 ....: kbd_event_lock ->led_lock FD: 1 BD: 14 ..-.: led_lock FD: 1 BD: 13 ....: vga_lock FD: 31 BD: 4089 -...: &port_lock_key ->&port->lock ->&tty->write_wait FD: 3 BD: 11 ....: console_srcu_srcu_usage.lock ->&obj_hash[i].lock FD: 1 BD: 32 ..-.: &ACCESS_PRIVATE(sdp, lock) FD: 43 BD: 3 +.+.: init_task.alloc_lock ->init_fs.lock FD: 37 BD: 1 +.+.: acpi_ioremap_lock ->pool_lock#2 ->resource_lock ->memtype_lock ->free_vmap_area_lock ->vmap_area_lock FD: 1 BD: 2 +.+.: memtype_lock FD: 1 BD: 17 ....: semaphore->lock FD: 1 BD: 13 ....: *(&acpi_gbl_reference_count_lock) FD: 9 BD: 1 ....: clockevents_lock ->tk_core.seq.seqcount ->tick_broadcast_lock ->i8253_lock FD: 3 BD: 2 -...: tick_broadcast_lock ->jiffies_lock FD: 1 BD: 2 ....: i8253_lock FD: 37 BD: 12 +.+.: &desc->request_mutex ->&irq_desc_lock_class ->&rq->__lock ->proc_subdir_lock ->&ent->pde_unload_lock ->proc_inum_ida.xa_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 142 BD: 2 +.+.: ioapic_mutex ->&domain->mutex FD: 141 BD: 111 +.+.: &domain->mutex ->pool_lock#2 ->vector_lock ->&irq_desc_lock_class ->i8259A_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->sparse_irq_lock ->fs_reclaim ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 1 BD: 128 -.-.: vector_lock FD: 19 BD: 424 +.+.: sysctl_lock ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock FD: 39 BD: 2 +.+.: tomoyo_policy_lock ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&c->lock ->&____s->seqcount ->&rq->__lock ->tomoyo_policy_lock.wait_lock ->&n->list_lock FD: 2 BD: 1 ....: aa_secids.xa_lock ->pool_lock#2 FD: 1 BD: 2 +.+.: aa_buffers_lock FD: 2 BD: 3 -.-.: jiffies_lock ->jiffies_seq.seqcount FD: 1 BD: 4 -.-.: jiffies_seq.seqcount FD: 16 BD: 4983 -.-.: hrtimer_bases.lock ->tk_core.seq.seqcount ->&obj_hash[i].lock FD: 29 BD: 1 -.-.: log_wait.lock ->&p->pi_lock FD: 881 BD: 4 ++++: pernet_ops_rwsem ->stack_depot_init_mutex ->crngs.lock ->net_rwsem ->proc_inum_ida.xa_lock ->pool_lock#2 ->proc_subdir_lock ->fs_reclaim ->&____s->seqcount ->&c->lock ->sysctl_lock ->pcpu_alloc_mutex ->net_generic_ids.xa_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&dir->lock ->&obj_hash[i].lock ->k-sk_lock-AF_NETLINK ->k-slock-AF_NETLINK ->nl_table_lock ->nl_table_wait.lock ->rtnl_mutex ->uevent_sock_mutex ->&zone->lock ->&net->rules_mod_lock ->slab_mutex ->batched_entropy_u32.lock ->percpu_counters_lock ->cache_list_lock ->rcu_node_0 ->&rq->__lock ->tk_core.seq.seqcount ->&k->list_lock ->lock ->&root->kernfs_rwsem ->&pool->lock/1 ->running_helpers_waitq.lock ->&sn->pipefs_sb_lock ->krc.lock ->&s->s_inode_list_lock ->nf_hook_mutex ->cpu_hotplug_lock ->hwsim_netgroup_ida.xa_lock ->nf_connlabels_lock ->nf_ct_ecache_mutex ->nf_log_mutex ->batched_entropy_u8.lock ->kfence_freelist_lock ->ipvs->est_mutex ->&base->lock ->__ip_vs_app_mutex ->&hashinfo->lock#2 ->&net->ipv6.ip6addrlbl_table.lock ->(console_sem).lock ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->k-clock-AF_INET6 ->wq_pool_mutex ->pcpu_lock ->&list->lock#4 ->&dir->lock#2 ->ptype_lock ->k-clock-AF_TIPC ->k-sk_lock-AF_TIPC ->k-slock-AF_TIPC ->&this->receive_lock ->once_lock ->nf_ct_proto_mutex ->k-sk_lock-AF_RXRPC ->k-slock-AF_RXRPC ->&rxnet->conn_lock ->&call->waitq ->&rx->call_lock ->&rxnet->call_lock ->&n->list_lock ->rtnl_mutex.wait_lock ->&p->pi_lock ->rdma_nets.xa_lock ->devices_rwsem ->stock_lock ->&____s->seqcount#2 ->&sem->wait_lock ->&net->nsid_lock ->ebt_mutex ->&xt[i].mutex ->&nft_net->commit_mutex ->netns_bpf_mutex ->&rnp->exp_wq[2] ->(&net->fs_probe_timer) ->&net->cells_lock ->(&net->cells_timer) ->(&net->fs_timer) ->bit_wait_table + i ->(wq_completion)kafsd ->&wq->mutex ->k-clock-AF_RXRPC ->&local->services_lock ->(wq_completion)krxrpcd ->rlock-AF_RXRPC ->&x->wait ->&xa->xa_lock#7 ->&fsnotify_mark_srcu ->&ent->pde_unload_lock ->ovs_mutex ->(work_completion)(&(&ovs_net->masks_rebalance)->work) ->(work_completion)(&ovs_net->dp_notify_work) ->&srv->idr_lock ->&rnp->exp_lock ->&rnp->exp_wq[0] ->(work_completion)(&tn->work) ->&rnp->exp_wq[1] ->&tn->nametbl_lock ->&rnp->exp_wq[3] ->(work_completion)(&ht->run_work) ->&ht->mutex ->(work_completion)(&(&c->work)->work) ->(wq_completion)krdsd ->(work_completion)(&rtn->rds_tcp_accept_w) ->rds_tcp_conn_lock ->loop_conns_lock ->(wq_completion)l2tp ->rcu_state.barrier_mutex ->(&rxnet->peer_keepalive_timer) ->(work_completion)(&rxnet->peer_keepalive_work) ->(&rxnet->service_conn_reap_timer) ->&x->wait#10 ->remove_cache_srcu ->dev_base_lock ->lweventlist_lock ->napi_hash_lock ->netdev_unregistering_wq.lock ->&rcu_state.expedited_wq ->&hn->hn_lock ->&pnettable->lock ->&pnetids_ndev->lock ->k-sk_lock-AF_INET6/1 ->&net->sctp.addr_wq_lock ->rcu_state.barrier_mutex.wait_lock ->&fn->fou_lock ->ipvs->sync_mutex ->hwsim_radio_lock ->rdma_nets_rwsem ->k-clock-AF_NETLINK ->&nlk->wait ->wlock-AF_NETLINK ->k-sk_lock-AF_INET ->k-slock-AF_INET ->&cfs_rq->removed.lock ->&sn->gssp_lock ->&cd->hash_lock ->(&net->can.stattimer) ->xfrm_state_gc_work ->&net->xfrm.xfrm_state_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->(work_completion)(&(&net->ipv6.addr_chk_work)->work) ->ip6_fl_lock ->(&net->ipv6.ip6_fib_timer) ->__ip_vs_mutex ->(&ipvs->dest_trash_timer) ->(work_completion)(&(&ipvs->expire_nodest_conn_work)->work) ->(work_completion)(&(&ipvs->defense_work)->work) ->(work_completion)(&(&ipvs->est_reload_work)->work) ->nfnl_subsys_ipset ->recent_lock ->hashlimit_mutex ->(work_completion)(&(&cnet->ecache.dwork)->work) ->sysfs_symlink_target_lock ->kernfs_idr_lock ->&meta->lock ->tcp_metrics_lock ->k-clock-AF_INET ->(work_completion)(&net->xfrm.policy_hash_work) ->&net->xfrm.xfrm_policy_lock ->(work_completion)(&net->xfrm.state_hash_work) ->&list->lock#2 ->genl_sk_destructing_waitq.lock ->rcu_state.exp_mutex.wait_lock ->rcu_state.exp_mutex ->quarantine_lock ->pcpu_alloc_mutex.wait_lock ->pgd_lock ->key ->rdma_nets_rwsem.wait_lock ->&device->compat_devs_mutex ->dev_pm_qos_sysfs_mtx ->subsys mutex#83 ->&x->wait#9 ->dpm_list_mtx ->&dev->power.lock ->deferred_probe_mutex ->device_links_lock ->gdp_mutex ->&device->unregistration_lock ->key#22 FD: 28 BD: 67 +.+.: stack_depot_init_mutex ->&rq->__lock FD: 97 BD: 3945 ++++: net_rwsem ->&list->lock#2 ->&rq->__lock ->pool_lock#2 ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&c->lock ->&n->list_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&table->lock#4 ->&ndev->lock ->&cfs_rq->removed.lock FD: 13 BD: 88 ....: proc_inum_ida.xa_lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->&n->list_lock ->&____s->seqcount#2 FD: 744 BD: 63 +.+.: rtnl_mutex ->&c->lock ->&____s->seqcount ->pool_lock#2 ->fs_reclaim ->&zone->lock ->pcpu_alloc_mutex ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->pool_lock ->running_helpers_waitq.lock ->subsys mutex#17 ->&dir->lock#2 ->dev_hotplug_mutex ->dev_base_lock ->input_pool.lock ->&rq->__lock ->nl_table_lock ->nl_table_wait.lock ->net_rwsem ->batched_entropy_u32.lock ->&tbl->lock ->sysctl_lock ->krc.lock ->stack_depot_init_mutex ->cpu_hotplug_lock ->kthread_create_lock ->&p->pi_lock ->&x->wait ->wq_pool_mutex ->crngs.lock ->&pool->lock/1 ->&cfs_rq->removed.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->lweventlist_lock ->&pool->lock ->rtnl_mutex.wait_lock ->proc_subdir_lock ->proc_inum_ida.xa_lock ->&k->k_lock ->param_lock ->(console_sem).lock ->&rdev->wiphy.mtx ->&base->lock ->subsys mutex#55 ->&sdata->sec_mtx ->&local->iflist_mtx#2 ->lock#7 ->failover_lock ->&tn->lock ->&idev->mc_lock ->&ndev->lock ->&pnettable->lock ->smc_ib_devices.mutex ->&(&net->nexthop.notifier_chain)->rwsem ->reg_requests_lock ->reg_pending_beacons_lock ->rlock-AF_NETLINK ->(inetaddr_validator_chain).rwsem ->(inetaddr_chain).rwsem ->_xmit_LOOPBACK ->netpoll_srcu ->&in_dev->mc_tomb_lock ->&im->lock ->fib_info_lock ->cbs_list_lock ->(inet6addr_validator_chain).rwsem ->&net->ipv6.addrconf_hash_lock ->&ifa->lock ->&tb->tb6_lock ->&n->list_lock ->&dev_addr_list_lock_key ->napi_hash_lock ->lapb_list_lock ->remove_cache_srcu ->x25_neigh_list_lock ->console_owner_lock ->console_owner ->_xmit_ETHER ->_xmit_SLIP ->&sem->wait_lock ->&vi->refill_lock ->noop_qdisc.q.lock ->&rfkill->lock ->&local->chanctx_mtx ->&dev->tx_global_lock ->rcu_node_0 ->&rnp->exp_wq[3] ->&sch->q.lock ->class ->(&tbl->proxy_timer) ->_xmit_VOID ->_xmit_X25 ->&lapbeth->up_lock ->&lapb->lock ->&rnp->exp_wq[1] ->&dir->lock ->&ul->lock#2 ->&n->lock ->dev_addr_sem ->_xmit_IEEE802154 ->reg_indoor_lock ->&nr_netdev_addr_lock_key ->listen_lock ->uevent_sock_mutex.wait_lock ->&r->consumer_lock ->&mm->mmap_lock ->quarantine_lock ->pcpu_lock ->(switchdev_blocking_notif_chain).rwsem ->&br->hash_lock ->nf_hook_mutex ->j1939_netdev_lock ->&bat_priv->tvlv.handler_list_lock ->&bat_priv->tvlv.container_list_lock ->&bat_priv->softif_vlan_list_lock ->key#16 ->&bat_priv->tt.changes_list_lock ->pgd_lock ->key ->percpu_counters_lock ->kernfs_idr_lock ->&rnp->exp_wq[0] ->&rnp->exp_wq[2] ->tk_core.seq.seqcount ->&wq->mutex ->init_lock ->&rcu_state.expedited_wq ->&meta->lock ->deferred_lock ->target_list_lock ->&br->lock ->&pn->hash_lock ->team->team_lock_key ->team->team_lock_key#2 ->team->team_lock_key#3 ->team->team_lock_key#4 ->team->team_lock_key#5 ->&hard_iface->bat_iv.ogm_buff_mutex ->ptype_lock ->team->team_lock_key#6 ->_xmit_NONE ->lock#9 ->&hsr->list_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->mount_lock ->&xa->xa_lock#13 ->&dev_addr_list_lock_key#3/1 ->req_lock ->&x->wait#11 ->subsys mutex#81 ->bpf_devs_lock ->(work_completion)(&(&devlink_port->type_warn_dw)->work) ->&devlink_port->type_lock ->&vn->sock_lock ->devnet_rename_sem ->&nft_net->commit_mutex ->&ent->pde_unload_lock ->&wg->device_update_lock ->_xmit_SIT ->&bridge_netdev_addr_lock_key/1 ->_xmit_TUNNEL ->_xmit_IPGRE ->_xmit_TUNNEL6 ->&dev_addr_list_lock_key/1 ->&dev_addr_list_lock_key#2/1 ->_xmit_ETHER/1 ->&nn->netlink_tap_lock ->&batadv_netdev_addr_lock_key/1 ->pcpu_alloc_mutex.wait_lock ->&vlan_netdev_addr_lock_key/1 ->&macvlan_netdev_addr_lock_key/1 ->&ipvlan->addrs_lock ->&macsec_netdev_addr_lock_key/1 ->key#20 ->&bat_priv->tt.commit_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->k-sk_lock-AF_INET ->k-slock-AF_INET ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->&ul->lock ->&lock->wait_lock ->&____s->seqcount#2 ->stock_lock ->__ip_vs_mutex ->netlbl_unlhsh_lock ->flowtable_lock ->nr_list_lock ->nr_neigh_list_lock ->&pn->all_ppp_mutex ->&ppp->rlock ->&ppp->wlock ->&hwstats->hwsdev_list_lock ->&net->xdp.lock ->mirred_list_lock ->&idev->mc_query_lock ->(work_completion)(&(&idev->mc_report_work)->work) ->&idev->mc_report_lock ->&pnn->pndevs.lock ->&pnn->routes.lock ->&dev_addr_list_lock_key#4 ->&pf->rwait ->dev_pm_qos_sysfs_mtx ->deferred_probe_mutex ->device_links_lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->&caifn->caifdevs.lock ->&tun->lock ->wlock-AF_UNSPEC ->elock-AF_UNSPEC ->sk_lock-AF_INET6 ->slock-AF_INET6 ->&net->rules_mod_lock ->(&mrt->ipmr_expire_timer) ->(work_completion)(&ht->run_work) ->&ht->mutex ->nf_connlabels_lock ->qdisc_mod_lock ->&block->lock ->&block->cb_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->rcu_state.exp_mutex.wait_lock ->_xmit_ETHER/2 ->(work_completion)(&(&slave->notify_work)->work) ->(&hsr->prune_timer) ->(&hsr->announce_timer) ->&dentry->d_lock ->&fsnotify_mark_srcu ->&sb->s_type->i_lock_key#7 ->&s->s_inode_list_lock ->&xa->xa_lock#7 ->&chain->filter_chain_lock ->cls_mod_lock ->&block->proto_destroy_lock ->&bridge_netdev_addr_lock_key ->&bond->mode_lock ->&net->xfrm.xfrm_state_lock ->(&br->hello_timer) ->(&br->topology_change_timer) ->(&br->tcn_timer) ->(work_completion)(&(&br->gc_work)->work) ->(&brmctx->ip4_mc_router_timer) ->(&brmctx->ip4_other_query.timer) ->(&brmctx->ip4_own_query.timer) ->(&brmctx->ip6_mc_router_timer) ->(&brmctx->ip6_other_query.timer) ->(&brmctx->ip6_own_query.timer) ->&br->multicast_lock ->(work_completion)(&br->mcast_gc_work) ->rcu_state.barrier_mutex ->sk_lock-AF_INET ->slock-AF_INET ->raw_notifier_lock ->bcm_notifier_lock ->isotp_notifier_lock ->wq_mayday_lock ->mrt_lock ->&dev_addr_list_lock_key#3/2 ->(work_completion)(&port->wq) ->&app->lock#2 ->(&app->join_timer)#2 ->(&app->periodic_timer) ->&list->lock#15 ->(&app->join_timer) ->&app->lock ->&list->lock#14 ->&p->alloc_lock ->&list->lock#2 ->act_mod_lock ->&tn->idrinfo->lock ->&pmc->lock ->(&mp->timer) ->&macsec_netdev_addr_lock_key#2/2 ->dev->qdisc_tx_busylock ?: &qdisc_tx_busylock ->&net->xfrm.xfrm_policy_lock FD: 2 BD: 1 +.-.: drivers/char/random.c:1010 ->input_pool.lock FD: 51 BD: 220 +.+.: lock ->kernfs_idr_lock ->cgroup_idr_lock ->pidmap_lock ->drm_minor_lock ->&file_private->table_lock ->&q->queue_lock ->sg_index_lock ->map_idr_lock ->prog_idr_lock ->btf_idr_lock ->&group->inotify_data.idr_lock ->link_idr_lock ->sctp_assocs_id_lock FD: 13 BD: 230 +.+.: kernfs_idr_lock ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock ->&n->list_lock ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 102 BD: 218 ++++: &root->kernfs_rwsem ->&root->kernfs_iattr_rwsem ->kernfs_idr_lock ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock ->&cfs_rq->removed.lock ->&base->lock ->quarantine_lock ->inode_hash_lock ->fs_reclaim ->mmu_notifier_invalidate_range_start ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#24 ->&c->lock ->&____s->seqcount ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock ->&n->list_lock ->&sem->wait_lock ->&sb->s_type->i_lock_key#30 ->&sb->s_type->i_lock_key#31 ->kernfs_rename_lock ->&xa->xa_lock#3 ->stock_lock ->&____s->seqcount#2 ->rcu_node_0 ->&meta->lock ->&p->pi_lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&rcu_state.expedited_wq FD: 1 BD: 4 ++++: file_systems_lock FD: 78 BD: 222 ++++: &root->kernfs_iattr_rwsem ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->iattr_mutex ->&sem->wait_lock ->tk_core.seq.seqcount ->rcu_node_0 ->&rcu_state.expedited_wq ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock ->pool_lock#2 FD: 5 BD: 51 +.+.: sb_lock ->unnamed_dev_ida.xa_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 116 BD: 1 +.+.: &type->s_umount_key/1 ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&obj_hash[i].lock ->percpu_counters_lock ->crngs.lock ->&sbinfo->stat_lock ->&____s->seqcount ->&c->lock ->&sb->s_type->i_lock_key ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&dentry->d_lock ->fs_reclaim ->mmu_notifier_invalidate_range_start FD: 28 BD: 41 +.+.: list_lrus_mutex ->&rq->__lock FD: 1 BD: 52 ....: unnamed_dev_ida.xa_lock FD: 1 BD: 11 +.+.: &sbinfo->stat_lock FD: 55 BD: 157 +.+.: &sb->s_type->i_lock_key ->&dentry->d_lock ->&xa->xa_lock#7 FD: 1 BD: 343 +.+.: &s->s_inode_list_lock FD: 40 BD: 423 +.+.: &dentry->d_lock ->&wq ->&dentry->d_lock/1 ->&wq#2 ->&lru->node[i].lock ->sysctl_lock ->&wq#3 ->&dentry->d_lock/2 ->&p->pi_lock FD: 2 BD: 27 ....: mnt_id_ida.xa_lock ->pool_lock#2 FD: 44 BD: 144 +.+.: mount_lock ->mount_lock.seqcount ->&dentry->d_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 42 BD: 144 +.+.: mount_lock.seqcount ->&new_ns->poll ->&dentry->d_lock ->&obj_hash[i].lock ->pool_lock#2 ->&p->pi_lock FD: 114 BD: 1 +.+.: &type->s_umount_key#2/1 ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&____s->seqcount ->&c->lock ->&sb->s_type->i_lock_key#2 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 41 BD: 146 +.+.: &sb->s_type->i_lock_key#2 ->&dentry->d_lock FD: 1 BD: 2 ..-.: ucounts_lock FD: 42 BD: 159 +.+.: init_fs.lock ->init_fs.seq.seqcount ->&dentry->d_lock FD: 1 BD: 153 +.+.: init_fs.seq.seqcount FD: 113 BD: 1 +.+.: &type->s_umount_key#3/1 ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&____s->seqcount ->&c->lock ->&sb->s_type->i_lock_key#3 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 55 BD: 336 +.+.: &sb->s_type->i_lock_key#3 ->&dentry->d_lock ->&xa->xa_lock#7 FD: 1 BD: 109 +.+.: cpuhp_state-down FD: 211 BD: 109 +.+.: cpuhp_state-up ->smpboot_threads_lock ->sparse_irq_lock ->&swhash->hlist_mutex ->pmus_lock ->&x->wait#5 ->&obj_hash[i].lock ->hrtimer_bases.lock ->wq_pool_mutex ->rcu_node_0 ->&rq->__lock ->jump_label_mutex ->fs_reclaim ->&c->lock ->&n->list_lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&x->wait#9 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#24 ->subsys mutex#25 ->&k->k_lock ->subsys mutex#78 ->&base->lock ->swap_slots_cache_mutex FD: 1 BD: 90 ++++: proc_subdir_lock FD: 114 BD: 1 +.+.: &type->s_umount_key#4/1 ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->&c->lock ->&zone->lock ->&____s->seqcount ->sb_lock ->&sb->s_type->i_lock_key#4 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 41 BD: 2 +.+.: &sb->s_type->i_lock_key#4 ->&dentry->d_lock FD: 32 BD: 111 ....: cgroup_file_kn_lock ->kernfs_notify_lock FD: 34 BD: 110 ....: css_set_lock ->cgroup_file_kn_lock ->&p->pi_lock ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock FD: 9 BD: 221 +...: cgroup_idr_lock ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount FD: 106 BD: 113 ++++: cpuset_rwsem ->cpuset_rwsem.rss.gp_wait.lock ->rcu_node_0 ->callback_lock ->&p->pi_lock ->&rq->__lock ->&obj_hash[i].lock ->jump_label_mutex ->&p->alloc_lock ->cpuset_attach_wq.lock ->&rnp->exp_lock ->rcu_state.exp_mutex FD: 3 BD: 114 ..-.: cpuset_rwsem.rss.gp_wait.lock ->&obj_hash[i].lock FD: 1 BD: 114 ....: callback_lock FD: 1 BD: 110 ....: cpuset_rwsem.waiters.lock FD: 82 BD: 16 +.+.: blkcg_pol_mutex ->pcpu_alloc_mutex ->fs_reclaim ->pool_lock#2 FD: 3 BD: 5089 ..-.: batched_entropy_u8.lock ->crngs.lock FD: 1 BD: 16 +.+.: &pgdat->memcg_lru.lock FD: 1 BD: 16 +.+.: devcgroup_mutex FD: 49 BD: 110 +.+.: freezer_mutex ->freezer_lock ->&rq->__lock ->rcu_node_0 ->freezer_mutex.wait_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 1 +.+.: &pool->lock#2 FD: 266 BD: 2 +.+.: spec_ctrl_mutex ->cpu_hotplug_lock ->(console_sem).lock ->&rq->__lock FD: 48 BD: 130 +.+.: rcu_state.exp_mutex ->rcu_node_0 ->rcu_state.exp_wake_mutex ->&obj_hash[i].lock ->&pool->lock ->&rnp->exp_wq[2] ->&rq->__lock ->&rnp->exp_wq[3] ->&rnp->exp_wq[0] ->&rnp->exp_wq[1] ->rcu_state.exp_mutex.wait_lock ->&rcu_state.expedited_wq ->pool_lock#2 ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&cfs_rq->removed.lock ->stock_lock FD: 38 BD: 135 +.+.: rcu_state.exp_wake_mutex ->rcu_node_0 ->&rnp->exp_lock ->&rnp->exp_wq[0] ->&rnp->exp_wq[1] ->&rnp->exp_wq[2] ->&rnp->exp_wq[3] ->&rq->__lock ->rcu_state.exp_wake_mutex.wait_lock ->&pool->lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 137 +.+.: &rnp->exp_lock FD: 29 BD: 172 ....: &rnp->exp_wq[0] ->&p->pi_lock FD: 29 BD: 171 ....: &rnp->exp_wq[1] ->&p->pi_lock FD: 1 BD: 113 ....: init_sighand.siglock FD: 1 BD: 3 +.+.: init_files.file_lock FD: 13 BD: 233 ....: pidmap_lock ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock ->&n->list_lock ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 153 BD: 109 ++++: cgroup_threadgroup_rwsem ->css_set_lock ->&p->pi_lock ->tk_core.seq.seqcount ->tasklist_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->&sighand->siglock ->cgroup_threadgroup_rwsem.rss.gp_wait.lock ->rcu_node_0 ->inode_hash_lock ->fs_reclaim ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#30 ->&root->kernfs_iattr_rwsem ->&s->s_inode_list_lock ->&xa->xa_lock#7 ->&fsnotify_mark_srcu ->&c->lock ->cpuset_rwsem ->cpuset_rwsem.waiters.lock ->cpuset_rwsem.rss.gp_wait.lock ->&p->alloc_lock ->freezer_mutex ->&____s->seqcount#2 ->&____s->seqcount ->freezer_mutex.wait_lock ->&rcu_state.expedited_wq FD: 28 BD: 4870 -.-.: &p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock FD: 69 BD: 112 .+.+: tasklist_lock ->init_task.pi_lock ->init_sighand.siglock ->&p->pi_lock ->&sighand->siglock ->&pid->wait_pidfd ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&p->alloc_lock ->stock_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 1 BD: 4963 -.-.: &per_cpu_ptr(group->pcpu, cpu)->seq FD: 1 BD: 1 ....: (kthreadd_done).wait.lock FD: 43 BD: 122 ....: &sighand->siglock ->&sig->wait_chldexit ->input_pool.lock ->&(&sig->stats_lock)->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->hrtimer_bases.lock ->&p->pi_lock ->&obj_hash[i].lock ->&sighand->signalfd_wqh ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&tty->ctrl.lock ->&prev->lock ->&rq->__lock ->stock_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&n->list_lock ->&____s->seqcount#2 ->quarantine_lock FD: 50 BD: 157 +.+.: &p->alloc_lock ->&____s->seqcount#2 ->init_fs.lock ->&fs->lock ->&x->wait ->&memcg->mm_list.lock ->&x->wait#25 ->&newf->file_lock FD: 1 BD: 5050 .-.-: &____s->seqcount#2 FD: 76 BD: 4466 +.+.: fs_reclaim ->mmu_notifier_invalidate_range_start ->&mapping->i_mmap_rwsem ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->rcu_node_0 ->&rcu_state.expedited_wq ->pool_lock#2 ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 37 BD: 4488 +.+.: mmu_notifier_invalidate_range_start ->&rq->__lock ->dma_fence_map ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 136 +.+.: kthread_create_lock FD: 29 BD: 179 ....: &x->wait ->&p->pi_lock FD: 37 BD: 114 +.+.: wq_pool_attach_mutex ->&p->pi_lock ->&x->wait#7 ->&pool->lock ->&stopper->lock ->&stop_pi_lock ->&rq->__lock ->wq_pool_attach_mutex.wait_lock ->&pool->lock/1 ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 29 BD: 4693 ..-.: wq_mayday_lock ->&p->pi_lock FD: 1 BD: 113 ....: &xa->xa_lock FD: 33 BD: 1 +.-.: (&pool->mayday_timer) ->&pool->lock/1 ->&obj_hash[i].lock ->&base->lock ->&pool->lock FD: 57 BD: 1 +.+.: (wq_completion)rcu_gp ->(work_completion)(&rnp->exp_poll_wq) ->(work_completion)(&(&ssp->srcu_sup->work)->work) ->(work_completion)(&sdp->work) ->(work_completion)(&rew->rew_work) ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 32 BD: 2 +.+.: (work_completion)(&rnp->exp_poll_wq) ->&rnp->exp_poll_lock FD: 14 BD: 1 +.-.: (&wq_watchdog_timer) ->&obj_hash[i].lock ->&base->lock FD: 396 BD: 1 +.+.: (wq_completion)events_unbound ->(work_completion)(&(&kfence_timer)->work) ->(work_completion)(&entry->work) ->(next_reseed).work ->(work_completion)(&sub_info->work) ->(stats_flush_dwork).work ->deferred_probe_work ->(work_completion)(&map->work) ->connector_reaper_work ->(reaper_work).work ->(work_completion)(&barr->work) ->(work_completion)(&port->bc_work) ->(work_completion)(&pool->idle_cull_work) ->&rq->__lock FD: 267 BD: 2 +.+.: (work_completion)(&(&kfence_timer)->work) ->cpu_hotplug_lock ->allocation_wait.lock ->&rq->__lock ->&obj_hash[i].lock ->&base->lock ->&cfs_rq->removed.lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 29 BD: 3 -.-.: allocation_wait.lock ->&p->pi_lock FD: 1 BD: 5091 ..-.: kfence_freelist_lock FD: 1 BD: 4543 ..-.: &meta->lock FD: 37 BD: 2 ....: rcu_tasks.cbs_gbl_lock ->(console_sem).lock ->rcu_tasks__percpu.cbs_pcpu_lock ->&ACCESS_PRIVATE(rtpcp, lock) FD: 3 BD: 3 ....: rcu_tasks__percpu.cbs_pcpu_lock ->&obj_hash[i].lock FD: 3 BD: 111 ....: &ACCESS_PRIVATE(rtpcp, lock) ->&obj_hash[i].lock FD: 5 BD: 2 ....: rcu_tasks_trace.cbs_gbl_lock ->rcu_tasks_trace__percpu.cbs_pcpu_lock ->&ACCESS_PRIVATE(rtpcp, lock) FD: 3 BD: 109 ....: rcu_tasks_trace__percpu.cbs_pcpu_lock ->&obj_hash[i].lock FD: 55 BD: 1 +.+.: rcu_tasks.tasks_gp_mutex ->rcu_tasks.cbs_gbl_lock ->&rq->__lock ->rcu_tasks__percpu.cbs_pcpu_lock ->&obj_hash[i].lock ->&base->lock ->tasks_rcu_exit_srcu_srcu_usage.lock ->&ACCESS_PRIVATE(sdp, lock) ->tasks_rcu_exit_srcu ->&x->wait#3 ->kernel/rcu/tasks.h:147 ->(&timer.timer) ->&x->wait#2 ->(console_sem).lock FD: 29 BD: 3 ....: &x->wait#2 ->&p->pi_lock FD: 29 BD: 170 ....: &rnp->exp_wq[2] ->&p->pi_lock FD: 32 BD: 6 ....: tasks_rcu_exit_srcu_srcu_usage.lock ->&obj_hash[i].lock ->&ACCESS_PRIVATE(sdp, lock) FD: 1 BD: 2 ....: tasks_rcu_exit_srcu FD: 41 BD: 2 +.+.: (work_completion)(&(&ssp->srcu_sup->work)->work) ->&ssp->srcu_sup->srcu_gp_mutex ->tasks_rcu_exit_srcu_srcu_usage.lock ->&obj_hash[i].lock ->&base->lock ->&ssp->srcu_sup->srcu_cb_mutex ->remove_cache_srcu_srcu_usage.lock ->wakeup_srcu_srcu_usage.lock ->&ACCESS_PRIVATE(ssp->srcu_sup, lock) ->&rq->__lock ->tracepoint_srcu_srcu_usage.lock ->&cfs_rq->removed.lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 40 BD: 3 +.+.: &ssp->srcu_sup->srcu_gp_mutex ->tasks_rcu_exit_srcu_srcu_usage.lock ->&ssp->srcu_sup->srcu_cb_mutex ->remove_cache_srcu_srcu_usage.lock ->wakeup_srcu_srcu_usage.lock ->&ACCESS_PRIVATE(ssp->srcu_sup, lock) ->&rq->__lock ->tracepoint_srcu_srcu_usage.lock ->rcu_node_0 ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->&rcu_state.expedited_wq FD: 29 BD: 14 ....: &x->wait#3 ->&p->pi_lock FD: 269 BD: 1 +.+.: rcu_tasks_trace.tasks_gp_mutex ->rcu_tasks_trace.cbs_gbl_lock ->&rq->__lock ->rcu_tasks_trace__percpu.cbs_pcpu_lock ->cpu_hotplug_lock ->&x->wait#2 ->&obj_hash[i].lock ->&base->lock ->(&timer.timer) ->(console_sem).lock FD: 5 BD: 1 -.-.: (null) ->tk_core.seq.seqcount FD: 31 BD: 1 ..-.: &(&ssp->srcu_sup->work)->timer FD: 37 BD: 4 +.+.: &ssp->srcu_sup->srcu_cb_mutex ->tasks_rcu_exit_srcu_srcu_usage.lock ->remove_cache_srcu_srcu_usage.lock ->wakeup_srcu_srcu_usage.lock ->&ACCESS_PRIVATE(ssp->srcu_sup, lock) ->&rq->__lock ->tracepoint_srcu_srcu_usage.lock ->&obj_hash[i].lock ->&base->lock ->&cfs_rq->removed.lock ->pool_lock#2 FD: 33 BD: 2 +.+.: (work_completion)(&sdp->work) ->&ACCESS_PRIVATE(sdp, lock) ->&obj_hash[i].lock ->&x->wait#3 ->&rq->__lock ->&cfs_rq->removed.lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 2 ....: kernel/rcu/tasks.h:147 FD: 33 BD: 1 ..-.: &(&kfence_timer)->timer FD: 29 BD: 167 +.-.: (&timer.timer) ->&p->pi_lock FD: 29 BD: 152 ....: &rnp->exp_wq[3] ->&p->pi_lock FD: 1 BD: 1 ....: &nmi_desc[0].lock FD: 119 BD: 110 +.+.: smpboot_threads_lock ->fs_reclaim ->batched_entropy_u8.lock ->kfence_freelist_lock ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&rq->__lock ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->cpuset_rwsem FD: 29 BD: 4370 ..-.: &rcu_state.gp_wq ->&p->pi_lock FD: 28 BD: 185 -.-.: &stop_pi_lock ->&rq->__lock FD: 1 BD: 185 -.-.: &stopper->lock FD: 1 BD: 2 +.+.: (module_notify_list).rwsem FD: 1 BD: 1 +.+.: ddebug_lock FD: 1 BD: 1 .+.+: &pmus_srcu FD: 266 BD: 1 +.+.: watchdog_mutex ->cpu_hotplug_lock FD: 29 BD: 109 ....: &x->wait#4 ->&p->pi_lock FD: 891 BD: 1 +.+.: (wq_completion)events ->(work_completion)(&sscs.work) ->pcpu_balance_work ->(work_completion)(&pwq->unbound_release_work) ->(shepherd).work ->(work_completion)(&rfkill_global_led_trigger_work) ->timer_update_work ->(work_completion)(&p->wq) ->(work_completion)(&(&group->avgs_work)->work) ->(work_completion)(&(&krcp->monitor_work)->work) ->(work_completion)(&helper->damage_work) ->(work_completion)(&rfkill->sync_work) ->(linkwatch_work).work ->(work_completion)(&w->work) ->(work_completion)(&vi->config_work) ->(debug_obj_work).work ->(work_completion)(&gadget->work) ->kernfs_notify_work ->(work_completion)(&blkg->free_work) ->async_lookup_work ->autoload_work ->(work_completion)(&barr->work) ->drain_vmap_work ->netstamp_work ->reg_work ->(work_completion)(&fw_work->work) ->(delayed_fput_work).work ->(work_completion)(&s->destroy_work) ->(work_completion)(&(&ovs_net->masks_rebalance)->work) ->(work_completion)(&aux->work) ->(work_completion)(&ht->run_work) ->(work_completion)(&w->w) ->(work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) ->(deferred_probe_timeout_work).work ->(work_completion)(&w->work)#2 ->(regulator_init_complete_work).work ->(work_completion)(&cgrp->bpf.release_work) ->deferred_process_work ->(work_completion)(&data->fib_event_work) ->(work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) ->(work_completion)(&(&hwstats->traffic_dw)->work) ->&rq->__lock ->(work_completion)(&(&conn->info_timer)->work) ->(work_completion)(&rdev->wiphy_work) ->wireless_nlevent_work ->free_ipc_work ->fqdir_free_work ->(work_completion)(&nlk->work) ->(work_completion)(&smcibdev->port_event_work) ->(ima_keys_delayed_work).work ->(work_completion)(&vmpr->work) ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((&memcg_stock) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&memcg_stock))) *)((&memcg_stock)))); (typeof((typeof(*((&memcg_stock))) *)((&memcg_stock)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&msk->work) ->rcu_node_0 ->&rcu_state.expedited_wq FD: 31 BD: 2 +.+.: (work_completion)(&sscs.work) ->&x->wait#5 ->&obj_hash[i].lock ->hrtimer_bases.lock ->&x->wait#4 FD: 1 BD: 111 -.-.: &x->wait#5 FD: 1 BD: 1 ....: rcu_callback FD: 2 BD: 158 +.+.: &newf->file_lock ->&newf->resize_wait FD: 1 BD: 1 ....: &p->vtime.seqcount FD: 40 BD: 108 +.+.: mem_hotplug_lock ->mem_hotplug_lock.rss.gp_wait.lock FD: 3 BD: 109 ..-.: mem_hotplug_lock.rss.gp_wait.lock ->&obj_hash[i].lock FD: 1 BD: 108 ....: mem_hotplug_lock.waiters.lock FD: 269 BD: 1 +.+.: cpu_add_remove_lock ->cpu_hotplug_lock ->cpu_hotplug_lock.waiters.lock ->cpu_hotplug_lock.rss.gp_wait.lock ->spec_ctrl_mutex ->cpuset_hotplug_work FD: 3 BD: 108 ..-.: cpu_hotplug_lock.rss.gp_wait.lock ->&obj_hash[i].lock FD: 1 BD: 109 +.+.: pcp_batch_high_lock FD: 1 BD: 108 +.+.: relay_channels_mutex FD: 140 BD: 114 +.+.: sparse_irq_lock ->text_mutex ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&zone->lock ->&____s->seqcount ->init_mm.page_table_lock ->(console_sem).lock ->rtc_lock ->&rq->__lock ->&irq_desc_lock_class ->lock ->&root->kernfs_rwsem ->&c->lock ->pcpu_alloc_mutex ->&obj_hash[i].lock FD: 1 BD: 116 ....: rtc_lock FD: 1 BD: 4963 ....: &rq->__lock/1 FD: 29 BD: 109 ....: &x->wait#6 ->&p->pi_lock FD: 1 BD: 4963 -.-.: &cfs_rq->removed.lock FD: 1 BD: 115 ....: &x->wait#7 FD: 18 BD: 4963 -...: &rt_b->rt_runtime_lock ->&rt_rq->rt_runtime_lock ->tk_core.seq.seqcount ->hrtimer_bases.lock FD: 1 BD: 4964 -...: &rt_rq->rt_runtime_lock FD: 1 BD: 2 ....: cpu_hotplug_lock.waiters.lock FD: 1 BD: 2 +.+.: cpuset_hotplug_work FD: 32 BD: 108 +.+.: stop_cpus_mutex ->&stopper->lock ->&stop_pi_lock ->&rq->__lock ->&x->wait#8 FD: 29 BD: 110 ....: &x->wait#8 ->&p->pi_lock FD: 83 BD: 1 +.+.: sched_domains_mutex ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->pcpu_alloc_mutex ->&c->lock ->&n->list_lock ->&zone->lock ->&____s->seqcount ->pcpu_lock FD: 1 BD: 4963 ....: &cp->lock FD: 1 BD: 1 +.+.: (memory_chain).rwsem FD: 117 BD: 1 +.+.: &type->s_umount_key#5/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&obj_hash[i].lock ->percpu_counters_lock ->crngs.lock ->&sbinfo->stat_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#5 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&dentry->d_lock FD: 55 BD: 147 +.+.: &sb->s_type->i_lock_key#5 ->&dentry->d_lock ->&xa->xa_lock#7 FD: 29 BD: 1 ....: (setup_done).wait.lock ->&p->pi_lock FD: 101 BD: 24 ++++: namespace_sem ->fs_reclaim ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->mnt_id_ida.xa_lock ->pcpu_alloc_mutex ->&dentry->d_lock ->mount_lock ->rename_lock ->&obj_hash[i].lock ->&new_ns->ns_lock ->stock_lock ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&rq->__lock ->&____s->seqcount#2 ->remove_cache_srcu ->namespace_sem.wait_lock ->&cfs_rq->removed.lock ->rcu_node_0 ->pcpu_alloc_mutex.wait_lock ->&p->pi_lock FD: 1 BD: 151 +.+.: &____s->seqcount#3 FD: 90 BD: 1 +.+.: &type->s_umount_key#6 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->&zone->lock ->&____s->seqcount ->&c->lock ->&lru->node[i].lock ->&sbinfo->stat_lock ->&obj_hash[i].lock FD: 29 BD: 425 +.+.: &lru->node[i].lock FD: 114 BD: 8 ++++: &sb->s_type->i_mutex_key ->namespace_sem ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->rename_lock.seqcount ->tomoyo_ss ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#2 ->&wb->list_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&rq->__lock ->&obj_hash[i].lock ->&dentry->d_lock/1 ->rcu_node_0 ->&cfs_rq->removed.lock ->&n->list_lock FD: 42 BD: 23 +.+.: rename_lock ->rename_lock.seqcount FD: 41 BD: 164 +.+.: rename_lock.seqcount ->&dentry->d_lock ->&dentry->d_lock/2 FD: 1 BD: 145 ....: &new_ns->poll FD: 2 BD: 426 +.+.: &____s->seqcount#4 ->&____s->seqcount#4/1 FD: 42 BD: 158 +.+.: &fs->lock ->&____s->seqcount#3 ->&dentry->d_lock FD: 1 BD: 142 +.+.: req_lock FD: 120 BD: 1 +.+.: of_mutex ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem FD: 1 BD: 178 ....: &x->wait#9 FD: 1 BD: 205 +.+.: &k->list_lock FD: 28 BD: 184 ++++: bus_type_sem ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 33 BD: 192 -...: &dev->power.lock ->&dev->power.lock/1 ->&dev->power.wait_queue FD: 35 BD: 180 +.+.: dpm_list_mtx ->(console_sem).lock ->&rq->__lock FD: 87 BD: 191 +.+.: uevent_sock_mutex ->fs_reclaim ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->nl_table_lock ->&obj_hash[i].lock ->nl_table_wait.lock ->&rq->__lock ->&cfs_rq->removed.lock ->rlock-AF_NETLINK ->rcu_node_0 ->&n->list_lock ->remove_cache_srcu ->quarantine_lock ->mmu_notifier_invalidate_range_start ->uevent_sock_mutex.wait_lock ->&base->lock ->&____s->seqcount#2 ->&rcu_state.expedited_wq ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->stock_lock FD: 1 BD: 178 ....: running_helpers_waitq.lock FD: 1 BD: 194 +.+.: sysfs_symlink_target_lock FD: 2 BD: 255 +.+.: &k->k_lock ->klist_remove_lock FD: 1 BD: 1 ....: &dev->mutex FD: 1 BD: 1 +.+.: subsys mutex FD: 2 BD: 1 +.+.: memory_blocks.xa_lock ->pool_lock#2 FD: 1 BD: 1 +.+.: subsys mutex#2 FD: 79 BD: 12 +.+.: register_lock ->proc_subdir_lock ->fs_reclaim ->pool_lock#2 ->proc_inum_ida.xa_lock ->&c->lock ->&zone->lock ->&____s->seqcount FD: 1 BD: 2 +.+.: (pm_chain_head).rwsem FD: 1 BD: 1 +.+.: cpufreq_governor_mutex FD: 43 BD: 2 +.+.: (work_completion)(&rew->rew_work) ->rcu_node_0 ->rcu_state.exp_wake_mutex ->&rcu_state.expedited_wq ->&obj_hash[i].lock ->&base->lock ->&pool->lock ->&rq->__lock ->(&timer.timer) ->&cfs_rq->removed.lock ->pool_lock#2 ->pool_lock ->rcu_state.exp_wake_mutex.wait_lock ->&p->pi_lock FD: 1 BD: 1 +.+.: dyn_event_ops_mutex FD: 1 BD: 2 ++++: binfmt_lock FD: 1 BD: 93 +.+.: pin_fs_lock FD: 115 BD: 1 +.+.: &type->s_umount_key#7/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#6 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 41 BD: 3 +.+.: &sb->s_type->i_lock_key#6 ->&dentry->d_lock FD: 92 BD: 1 +.+.: &sb->s_type->i_mutex_key#2 ->&sb->s_type->i_lock_key#6 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&c->lock ->&____s->seqcount ->&zone->lock ->&obj_hash[i].lock FD: 29 BD: 424 ....: &wq ->&p->pi_lock FD: 1 BD: 36 +.+.: chrdevs_lock FD: 782 BD: 1 ++++: cb_lock ->genl_mutex ->fs_reclaim ->pool_lock#2 ->rlock-AF_NETLINK ->&c->lock ->&n->list_lock ->rtnl_mutex ->&obj_hash[i].lock ->&rdev->wiphy.mtx ->nlk_cb_mutex-GENERIC ->&____s->seqcount ->quarantine_lock ->remove_cache_srcu ->genl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->rtnl_mutex.wait_lock ->rcu_node_0 ->&lock->wait_lock ->&____s->seqcount#2 ->&dir->lock#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&cfs_rq->removed.lock ->(console_sem).lock ->&devlink->lock_key ->&devlink->lock_key#4 FD: 763 BD: 3 +.+.: genl_mutex ->fs_reclaim ->pool_lock#2 ->nl_table_lock ->nl_table_wait.lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock ->rlock-AF_NETLINK ->&n->list_lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->genl_mutex.wait_lock ->hwsim_radio_lock ->&x->wait#9 ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&cfs_rq->removed.lock ->batched_entropy_u32.lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->subsys mutex#52 ->device_links_lock ->&k->k_lock ->deferred_probe_mutex ->cpu_hotplug_lock ->wq_pool_mutex ->crngs.lock ->triggers_list_lock ->leds_list_lock ->rfkill_global_mutex ->rfkill_global_mutex.wait_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->(inetaddr_chain).rwsem ->inet6addr_chain.lock ->&____s->seqcount#2 ->quarantine_lock ->(console_sem).lock ->rcu_node_0 ->&rcu_state.expedited_wq ->batched_entropy_u8.lock ->kfence_freelist_lock ->hwsim_phys_lock ->key#26 ->&meta->lock ->remove_cache_srcu ->&pn->l2tp_tunnel_idr_lock ->stock_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&dir->lock ->l2tp_ip6_lock ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->k-clock-AF_INET6 ->&xa->xa_lock#7 ->&fsnotify_mark_srcu ->&pernet->lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->&ht->lock FD: 115 BD: 1 +.+.: &type->s_umount_key#8/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#7 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 41 BD: 86 +.+.: &sb->s_type->i_lock_key#7 ->&dentry->d_lock FD: 102 BD: 84 +.+.: &sb->s_type->i_mutex_key#3 ->&sb->s_type->i_lock_key#7 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&c->lock ->&____s->seqcount ->&zone->lock ->&obj_hash[i].lock ->&rq->__lock ->&cfs_rq->removed.lock ->rcu_node_0 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&n->list_lock ->(console_sem).lock ->pin_fs_lock ->mount_lock ->&fsnotify_mark_srcu ->&xa->xa_lock#7 ->&____s->seqcount#2 ->remove_cache_srcu ->&xa->xa_lock#3 ->stock_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 1 BD: 4 +.+.: subsys mutex#3 FD: 4 BD: 6 ....: async_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 377 BD: 2 +.+.: (work_completion)(&entry->work) ->tk_core.seq.seqcount ->&dev->power.lock ->&k->list_lock ->sysfs_symlink_target_lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock ->&x->wait#9 ->&c->lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->async_lock ->async_done.lock ->&dentry->d_lock ->&sb->s_type->i_mutex_key ->sb_writers#2 ->quarantine_lock ->&pool->lock/1 ->cpu_hotplug_lock ->wq_pool_mutex ->pcpu_alloc_mutex ->batched_entropy_u32.lock ->&n->list_lock ->mmu_notifier_invalidate_range_start ->blk_queue_ida.xa_lock ->&q->sysfs_lock ->&set->tag_list_lock ->bio_slab_lock ->percpu_counters_lock ->&sb->s_type->i_lock_key#3 ->&s->s_inode_list_lock ->&xa->xa_lock#8 ->&q->mq_freeze_lock ->percpu_ref_switch_lock ->&q->queue_lock ->&rq->__lock ->major_names_lock ->floppy_lock ->rtc_lock ->&wq->mutex ->&desc->request_mutex ->register_lock ->&irq_desc_lock_class ->proc_subdir_lock ->proc_inum_ida.xa_lock ->resource_lock ->&base->lock ->(&timer.timer) ->command_done.lock ->&shost->scan_mutex ->(console_sem).lock ->async_scan_lock ->klist_remove_lock ->kernfs_idr_lock ->(&motor_off_timer[drive]) ->&xa->xa_lock#9 ->&q->unused_hctx_lock ->(&sq->pending_timer) ->(work_completion)(&td->dispatch_work) ->&q->blkcg_mutex ->pcpu_lock ->&xa->xa_lock#7 ->&fsnotify_mark_srcu FD: 1 BD: 20 .+.+: device_links_srcu FD: 1 BD: 3 +.+.: regulator_list_mutex FD: 30 BD: 19 +.+.: fwnode_link_lock ->&k->k_lock ->&rq->__lock FD: 31 BD: 104 +.+.: device_links_lock ->&k->list_lock ->&k->k_lock ->&rq->__lock FD: 1 BD: 4 ....: &dev->devres_lock FD: 1 BD: 4 +.+.: regulator_nesting_mutex FD: 2 BD: 1 +.+.: regulator_ww_class_mutex ->regulator_nesting_mutex FD: 124 BD: 154 +.+.: gdp_mutex ->&k->list_lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->kobj_ns_type_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&rq->__lock ->&obj_hash[i].lock ->sysfs_symlink_target_lock ->gdp_mutex.wait_lock ->&sem->wait_lock ->&p->pi_lock ->kernfs_idr_lock FD: 3 BD: 3 +.+.: subsys mutex#4 ->&k->k_lock FD: 28 BD: 104 +.+.: deferred_probe_mutex ->&rq->__lock FD: 1 BD: 18 ....: probe_waitqueue.lock FD: 1 BD: 3 ....: async_done.lock FD: 115 BD: 1 +.+.: &type->s_umount_key#9/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 42 BD: 155 +.+.: &sb->s_type->i_lock_key#8 ->&dentry->d_lock ->bit_wait_table + i FD: 85 BD: 70 +.+.: pack_mutex ->fs_reclaim ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->init_mm.page_table_lock ->vmap_purge_lock ->cpa_lock ->text_mutex FD: 1 BD: 69 +.+.: &fp->aux->used_maps_mutex FD: 1 BD: 1 +.+.: proto_list_mutex FD: 1 BD: 1 +.+.: targets_mutex FD: 30 BD: 4184 ...-: nl_table_lock ->pool_lock#2 ->&obj_hash[i].lock ->nl_table_wait.lock FD: 29 BD: 4185 ..-.: nl_table_wait.lock ->&p->pi_lock FD: 1 BD: 1 +.+.: net_family_lock FD: 2 BD: 5 ....: net_generic_ids.xa_lock ->pool_lock#2 FD: 4 BD: 96 ..-.: &dir->lock ->&obj_hash[i].lock ->pool_lock#2 FD: 36 BD: 5 +.+.: k-sk_lock-AF_NETLINK ->k-slock-AF_NETLINK FD: 1 BD: 6 +...: k-slock-AF_NETLINK FD: 2 BD: 3955 ..-.: rhashtable_bucket ->rhashtable_bucket/1 FD: 1 BD: 9 ....: &list->lock FD: 29 BD: 9 ....: kauditd_wait.lock ->&p->pi_lock FD: 3 BD: 2 +.+.: lock#2 ->&zone->lock FD: 81 BD: 1 +.+.: khugepaged_mutex ->fs_reclaim ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&rq->__lock ->&obj_hash[i].lock ->lock#2 ->pcp_batch_high_lock FD: 3 BD: 13 +.+.: subsys mutex#5 ->&k->k_lock FD: 4 BD: 1 +.+.: subsys mutex#6 ->&k->list_lock ->&k->k_lock FD: 1 BD: 1 +.+.: regmap_debugfs_early_lock FD: 1 BD: 1 +.+.: (acpi_reconfig_chain).rwsem FD: 1 BD: 1 +.+.: __i2c_board_lock FD: 80 BD: 1 +.+.: core_lock ->&k->list_lock ->&k->k_lock ->fs_reclaim ->pool_lock#2 FD: 2 BD: 1 +.+.: thermal_governor_lock ->thermal_list_lock FD: 1 BD: 2 +.+.: thermal_list_lock FD: 127 BD: 1 +.+.: cpuidle_lock ->&obj_hash[i].lock ->(console_sem).lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount ->lock ->&root->kernfs_rwsem FD: 80 BD: 1 +.+.: k-sk_lock-AF_QIPCRTR ->k-slock-AF_QIPCRTR ->fs_reclaim ->qrtr_ports.xa_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->pool_lock#2 ->qrtr_node_lock ->&obj_hash[i].lock ->&meta->lock FD: 29 BD: 4513 -.-.: &rcu_state.expedited_wq ->&p->pi_lock FD: 1 BD: 2 +...: k-slock-AF_QIPCRTR FD: 1 BD: 2 +.+.: qrtr_ports.xa_lock FD: 1 BD: 2 +.+.: qrtr_node_lock FD: 44 BD: 111 ....: freezer_lock ->&sighand->siglock FD: 1 BD: 1 ....: printk_ratelimit_state.lock FD: 1 BD: 1 ....: audit_backlog_wait.lock FD: 79 BD: 122 ++++: (crypto_chain).rwsem ->fs_reclaim ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&rq->__lock ->&obj_hash[i].lock ->&c->lock ->&zone->lock ->&____s->seqcount FD: 267 BD: 1 +.+.: iova_cache_mutex ->cpu_hotplug_lock ->slab_mutex FD: 3 BD: 1 +.+.: subsys mutex#7 ->&k->k_lock FD: 1 BD: 114 ....: pci_config_lock FD: 1 BD: 1 +.+.: subsys mutex#8 FD: 81 BD: 96 +.+.: dev_pm_qos_mtx ->fs_reclaim ->pool_lock#2 ->&dev->power.lock ->pm_qos_lock ->&c->lock ->&____s->seqcount ->&zone->lock ->&rq->__lock FD: 1 BD: 97 ....: pm_qos_lock FD: 125 BD: 95 +.+.: dev_pm_qos_sysfs_mtx ->dev_pm_qos_mtx ->&root->kernfs_rwsem ->fs_reclaim ->pool_lock#2 ->lock ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock ->&c->lock ->&rq->__lock ->&sem->wait_lock ->&p->pi_lock FD: 1 BD: 1 ..-.: uidhash_lock FD: 374 BD: 2 +.+.: &type->s_umount_key#28/1 ->fs_reclaim ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->&xa->xa_lock#7 ->lock#4 ->&mapping->private_lock ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->bit_wait_table + i ->&rq->__lock ->&sb->s_type->i_lock_key#3 ->lock#5 ->&lruvec->lru_lock ->&zone->lock ->crypto_alg_sem ->pool_lock#2 ->percpu_counters_lock ->inode_hash_lock ->&c->lock ->&sb->s_type->i_lock_key#22 ->&sb->s_type->i_mutex_key#8 ->proc_subdir_lock ->proc_inum_ida.xa_lock ->&journal->j_state_lock ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&journal->j_wait_done_commit ->&sbi->s_error_lock ->&base->lock ->&fq->mq_flush_lock ->&p->alloc_lock ->cpu_hotplug_lock ->wq_pool_mutex ->&ei->i_es_lock ->ext4_grpinfo_slab_create_mutex ->&s->s_inode_list_lock ->ext4_li_mtx ->lock ->&root->kernfs_rwsem ->(console_sem).lock ->&dentry->d_lock FD: 97 BD: 1 +.+.: (work_completion)(&eval_map_work) ->trace_event_sem ->trace_event_sem.wait_lock ->&p->pi_lock ->&rq->__lock FD: 1 BD: 1 ....: oom_reaper_wait.lock FD: 1 BD: 1 +.+.: subsys mutex#9 FD: 1 BD: 1 ....: &pgdat->kcompactd_wait FD: 82 BD: 2 +.+.: pcpu_balance_work ->pcpu_alloc_mutex ->pcpu_alloc_mutex.wait_lock ->&p->pi_lock ->&rq->__lock FD: 1 BD: 191 +.+.: pcpu_alloc_mutex.wait_lock FD: 149 BD: 1 +.+.: memory_tier_lock ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->&zone->lock ->&____s->seqcount ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->&c->lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#10 FD: 1 BD: 2 +.+.: subsys mutex#10 FD: 1 BD: 1 +.+.: ksm_thread_mutex FD: 1 BD: 1 ....: ksm_thread_wait.lock FD: 1 BD: 2 +.+.: damon_ops_lock FD: 80 BD: 121 ++++: crypto_alg_sem ->(crypto_chain).rwsem ->&rq->__lock FD: 44 BD: 1 +.+.: lock#3 ->&obj_hash[i].lock ->&rq->__lock ->(work_completion)(work) ->&x->wait#10 ->&cfs_rq->removed.lock FD: 1 BD: 98 +.+.: khugepaged_mm_lock FD: 29 BD: 98 ....: khugepaged_wait.lock ->&p->pi_lock FD: 103 BD: 2 +.+.: (work_completion)(&pwq->unbound_release_work) ->&wq->mutex ->wq_pool_mutex ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock ->rcu_node_0 ->&pool->lock ->&rnp->exp_wq[0] ->&rnp->exp_wq[1] ->&rnp->exp_wq[3] ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&rnp->exp_wq[2] ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->pool_lock FD: 128 BD: 5 +.+.: bio_slab_lock ->fs_reclaim ->pool_lock#2 ->slab_mutex ->bio_slabs.xa_lock ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock FD: 9 BD: 6 +.+.: bio_slabs.xa_lock ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount FD: 78 BD: 3 +.+.: major_names_lock ->fs_reclaim ->pool_lock#2 ->major_names_spinlock ->&c->lock ->&zone->lock ->&____s->seqcount FD: 1 BD: 4 +.+.: major_names_spinlock FD: 1 BD: 4488 ..-.: quarantine_lock FD: 38 BD: 4272 .+.+: remove_cache_srcu ->quarantine_lock ->&c->lock ->&n->list_lock ->pool_lock#2 ->&obj_hash[i].lock ->&____s->seqcount ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->&base->lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 3 BD: 13 +.+.: subsys mutex#11 ->&k->k_lock FD: 1 BD: 1 ....: *(&acpi_gbl_hardware_lock) FD: 35 BD: 1 ....: *(&acpi_gbl_gpe_lock) ->(console_sem).lock FD: 5 BD: 126 ....: mask_lock ->tmp_mask_lock FD: 4 BD: 127 -.-.: tmp_mask_lock ->vector_lock ->ioapic_lock FD: 1 BD: 1 -...: shrink_qlist.lock FD: 32 BD: 5 ....: remove_cache_srcu_srcu_usage.lock ->&obj_hash[i].lock ->&ACCESS_PRIVATE(sdp, lock) FD: 37 BD: 108 +.+.: flush_lock ->&obj_hash[i].lock ->(work_completion)(&sfw->work) ->&x->wait#10 ->&rq->__lock FD: 10 BD: 110 +.+.: (work_completion)(&sfw->work) ->&c->lock ->&obj_hash[i].lock ->&n->list_lock FD: 34 BD: 109 +.+.: (wq_completion)slub_flushwq ->(work_completion)(&sfw->work) ->(work_completion)(&barr->work) FD: 29 BD: 4569 ....: &x->wait#10 ->&p->pi_lock FD: 32 BD: 113 +.+.: (work_completion)(&barr->work) ->&x->wait#10 ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock FD: 1 BD: 1 +.+.: system_transition_mutex FD: 1 BD: 1 +.+.: (power_off_prep_handler_list).rwsem FD: 1 BD: 1 ....: power_off_handler_list.lock FD: 1 BD: 1 +.+.: (restart_prep_handler_list).rwsem FD: 1 BD: 1 +.+.: (reboot_notifier_list).rwsem FD: 177 BD: 1 +.+.: acpi_scan_lock ->semaphore->lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&x->wait#9 ->&c->lock ->&zone->lock ->&____s->seqcount ->&n->list_lock ->acpi_device_lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->&dev->power.lock ->dpm_list_mtx ->subsys mutex#12 ->uevent_sock_mutex ->running_helpers_waitq.lock ->*(&acpi_gbl_reference_count_lock) ->batched_entropy_u8.lock ->kfence_freelist_lock ->pci_config_lock ->&meta->lock ->quarantine_lock ->(console_sem).lock ->pci_bus_sem ->pci_mmcfg_lock ->resource_lock ->&device->physical_node_lock ->fwnode_link_lock ->devtree_lock ->gdp_mutex ->subsys mutex#13 ->pci_acpi_companion_lookup_sem ->pci_slot_mutex ->tk_core.seq.seqcount ->resource_alignment_lock ->device_links_srcu ->subsys mutex#14 ->acpi_pm_notifier_install_lock ->&rq->__lock ->pci_rescan_remove_lock ->subsys mutex#3 ->acpi_link_lock ->wakeup_ida.xa_lock ->subsys mutex#15 ->events_lock ->power_resource_list_lock FD: 79 BD: 2 +.+.: acpi_device_lock ->fs_reclaim ->pool_lock#2 ->&xa->xa_lock#2 ->semaphore->lock ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount FD: 1 BD: 3 ....: &xa->xa_lock#2 FD: 1 BD: 2 +.+.: subsys mutex#12 FD: 1 BD: 2 ++++: pci_bus_sem FD: 1 BD: 2 +.+.: pci_mmcfg_lock FD: 121 BD: 12 +.+.: &device->physical_node_lock ->sysfs_symlink_target_lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&c->lock ->&zone->lock ->&____s->seqcount FD: 3 BD: 2 +.+.: subsys mutex#13 ->&k->k_lock FD: 1 BD: 2 .+.+: pci_acpi_companion_lookup_sem FD: 1 BD: 2 +.+.: pci_slot_mutex FD: 1 BD: 2 +.+.: resource_alignment_lock FD: 1 BD: 193 ....: &dev->power.lock/1 FD: 1 BD: 2 +.+.: subsys mutex#14 FD: 146 BD: 2 +.+.: acpi_pm_notifier_install_lock ->semaphore->lock ->fs_reclaim ->pool_lock#2 ->*(&acpi_gbl_reference_count_lock) ->acpi_pm_notifier_lock FD: 143 BD: 3 +.+.: acpi_pm_notifier_lock ->fs_reclaim ->pool_lock#2 ->wakeup_ida.xa_lock ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&____s->seqcount ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#15 ->events_lock FD: 1 BD: 7 ....: wakeup_ida.xa_lock FD: 3 BD: 7 +.+.: subsys mutex#15 ->&k->k_lock FD: 1 BD: 7 ....: events_lock FD: 79 BD: 1 +.+.: &pgdat->kswapd_lock ->fs_reclaim ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->&x->wait FD: 33 BD: 1 ..-.: drivers/char/random.c:251 FD: 15 BD: 2 +.+.: (next_reseed).work ->&obj_hash[i].lock ->&base->lock ->input_pool.lock ->base_crng.lock FD: 37 BD: 2 +.+.: pci_rescan_remove_lock FD: 86 BD: 2 +.+.: acpi_link_lock ->fs_reclaim ->pool_lock#2 ->semaphore->lock ->&obj_hash[i].lock ->*(&acpi_gbl_reference_count_lock) ->pci_config_lock ->&zone->lock ->&____s->seqcount ->(console_sem).lock ->&c->lock ->&rq->__lock FD: 1 BD: 2 +.+.: power_resource_list_lock FD: 155 BD: 7 ++++: &(&priv->bus_notifier)->rwsem ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&zone->lock ->i2c_dev_list_lock ->&x->wait#9 ->&obj_hash[i].lock ->chrdevs_lock ->&k->list_lock ->gdp_mutex ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&rq->__lock ->&x->wait#11 ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#65 FD: 115 BD: 1 +.+.: &type->s_umount_key#10/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&zone->lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#9 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 41 BD: 2 +.+.: &sb->s_type->i_lock_key#9 ->&dentry->d_lock FD: 115 BD: 1 +.+.: &type->s_umount_key#11/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->&zone->lock ->&obj_hash[i].lock ->&sb->s_type->i_lock_key#10 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 41 BD: 2 +.+.: &sb->s_type->i_lock_key#10 ->&dentry->d_lock FD: 204 BD: 97 ++++: &mm->mmap_lock ->reservation_ww_class_acquire ->fs_reclaim ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&mm->page_table_lock ->ptlock_ptr(page) ->&anon_vma->rwsem ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->ptlock_ptr(page)#2 ->lock#4 ->lock#5 ->mmu_notifier_invalidate_range_start ->&vma->vm_lock->lock ->&obj_hash[i].lock ->&lruvec->lru_lock ->rcu_node_0 ->&rq->__lock ->quarantine_lock ->&cfs_rq->removed.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&mapping->i_mmap_rwsem ->&rcu_state.expedited_wq ->resource_lock ->&p->alloc_lock ->tk_core.seq.seqcount ->&mm->mmap_lock/1 ->&n->list_lock ->&folio_wait_table[i] ->remove_cache_srcu ->&sem->wait_lock ->&p->pi_lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->khugepaged_mm_lock ->khugepaged_wait.lock ->&xa->xa_lock#7 ->&info->lock ->mount_lock ->&sb->s_type->i_lock_key ->&wb->list_lock ->&kcov->lock ->stock_lock ->&____s->seqcount#2 ->sb_pagefaults ->&mapping->private_lock ->&xa->xa_lock#3 ->&s->s_inode_list_lock ->batched_entropy_u32.lock ->&dd->lock ->&sb->s_type->i_mutex_key#21 ->&hugetlbfs_i_mmap_rwsem_key ->&hugetlb_fault_mutex_table[i] ->&lock->wait_lock ->hugetlb_lock ->&resv_map->lock ->(console_sem).lock ->console_owner_lock ->console_owner ->&p->lock#2 ->&xa->xa_lock#19 ->&ctrl->lock ->&tree->lock ->mutex ->&cache->free_lock ->&base->lock FD: 85 BD: 113 +.+.: reservation_ww_class_acquire ->reservation_ww_class_mutex FD: 84 BD: 114 +.+.: reservation_ww_class_mutex ->fs_reclaim ->&shmem->vmap_lock FD: 75 BD: 4470 ++++: &mapping->i_mmap_rwsem ->&obj_hash[i].lock ->pool_lock#2 ->&____s->seqcount ->&anon_vma->rwsem ->quarantine_lock ->&rq->__lock ->&meta->lock ->kfence_freelist_lock ->mmu_notifier_invalidate_range_start ->ptlock_ptr(page) ->rcu_node_0 ->&sem->wait_lock ->&cfs_rq->removed.lock ->&base->lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->ptlock_ptr(page)#2 ->lock#4 ->lock#5 ->&rcu_state.expedited_wq ->&p->pi_lock ->stock_lock FD: 1 BD: 4489 +.+.: dma_fence_map FD: 28 BD: 3 +.+.: delayed_uprobe_lock ->&rq->__lock FD: 1 BD: 4379 ....: key FD: 1 BD: 4 +.+.: attribute_container_mutex FD: 92 BD: 17 ++++: triggers_list_lock ->&led_cdev->trigger_lock FD: 92 BD: 17 ++++: leds_list_lock ->&led_cdev->trigger_lock FD: 162 BD: 2 ++++: (usb_notifier_list).rwsem ->fs_reclaim ->pool_lock#2 ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&rq->__lock ->&x->wait#11 ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#59 ->mon_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 31 BD: 1 ..-.: mm/vmstat.c:2014 FD: 266 BD: 2 +.+.: (shepherd).work ->cpu_hotplug_lock ->&obj_hash[i].lock ->&base->lock FD: 40 BD: 2 +.+.: (wq_completion)mm_percpu_wq ->(work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) ->(work_completion)(work) ->(work_completion)(&barr->work) ->&rq->__lock FD: 29 BD: 3 +.+.: (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) ->&obj_hash[i].lock ->&base->lock ->&pcp->lock ->&rq->__lock ->&cfs_rq->removed.lock FD: 1 BD: 1 +.+.: rc_map_lock FD: 1 BD: 1 +.+.: subsys mutex#16 FD: 1 BD: 2 +.+.: &entry->access FD: 79 BD: 2 +.+.: info_mutex ->proc_subdir_lock ->fs_reclaim ->pool_lock#2 ->proc_inum_ida.xa_lock ->&c->lock ->&____s->seqcount FD: 1 BD: 155 +.+.: kobj_ns_type_lock FD: 30 BD: 68 +.+.: subsys mutex#17 ->&k->k_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 7 BD: 4062 ..-.: &dir->lock#2 ->&obj_hash[i].lock ->pool_lock#2 ->quarantine_lock ->&meta->lock ->kfence_freelist_lock FD: 36 BD: 72 +.+.: dev_hotplug_mutex ->&dev->power.lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->&k->k_lock FD: 1 BD: 73 ++++: dev_base_lock FD: 1 BD: 64 ++++: qdisc_mod_lock FD: 18 BD: 1 ++++: bt_proto_lock ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->chan_list_lock ->l2cap_sk_list.lock ->hci_sk_list.lock ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->sco_sk_list.lock ->&____s->seqcount FD: 86 BD: 21 +.+.: hci_cb_list_lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->chan_list_lock ->&conn->ident_lock ->&base->lock ->&list->lock#12 ->&conn->chan_lock ->&rq->__lock ->&c->lock ->&____s->seqcount ->&list->lock#13 ->(work_completion)(&(&conn->id_addr_timer)->work) ->(work_completion)(&(&conn->info_timer)->work) FD: 1 BD: 4 +.+.: mgmt_chan_list_lock FD: 1 BD: 3947 ....: &list->lock#2 FD: 77 BD: 66 +.+.: rate_ctrl_mutex ->fs_reclaim ->pool_lock#2 FD: 2 BD: 4 +.+.: netlbl_domhsh_lock ->pool_lock#2 FD: 1 BD: 65 +.+.: netlbl_unlhsh_lock FD: 165 BD: 1 +.+.: misc_mtx ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&rq->__lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#18 ->misc_minors_ida.xa_lock ->&cfs_rq->removed.lock ->&base->lock ->&dir->lock ->rfkill_global_mutex ->&____s->seqcount#2 ->&n->list_lock ->remove_cache_srcu FD: 29 BD: 142 ....: &x->wait#11 ->&p->pi_lock FD: 124 BD: 1 .+.+: sb_writers ->mount_lock ->&type->i_mutex_dir_key/1 ->&sb->s_type->i_mutex_key#4 ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#5 ->&wb->list_lock ->&type->i_mutex_dir_key#2 ->&sem->wait_lock ->&p->pi_lock ->&rq->__lock ->&dentry->d_lock ->tomoyo_ss FD: 116 BD: 2 +.+.: &type->i_mutex_dir_key/1 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->&obj_hash[i].lock ->&sbinfo->stat_lock ->&zone->lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#5 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&sb->s_type->i_mutex_key#4 ->quarantine_lock ->tomoyo_ss ->&u->bindlock ->&rq->__lock ->&fsnotify_mark_srcu ->&n->list_lock ->&sem->wait_lock ->krc.lock ->&xa->xa_lock#7 ->&sb->s_type->i_mutex_key#4/4 ->remove_cache_srcu ->&____s->seqcount#2 FD: 90 BD: 3 +.+.: &sb->s_type->i_mutex_key#4 ->tk_core.seq.seqcount ->tomoyo_ss ->&xattrs->lock ->&dentry->d_lock ->&fsnotify_mark_srcu ->&sb->s_type->i_mutex_key#4/4 ->&rq->__lock ->&sb->s_type->i_lock_key#5 ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->stock_lock ->&xa->xa_lock#7 ->lock#4 ->&info->lock ->key#9 ->rcu_node_0 ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->&wb->list_lock ->lock#5 ->&lruvec->lru_lock ->&sem->wait_lock ->&rcu_state.expedited_wq FD: 3 BD: 2 +.+.: subsys mutex#18 ->&k->k_lock FD: 167 BD: 6 +.+.: input_mutex ->&rq->__lock ->input_devices_poll_wait.lock ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&dev->mutex#2 ->input_ida.xa_lock ->&x->wait#9 ->&obj_hash[i].lock ->chrdevs_lock ->&k->list_lock ->&zone->lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#30 ->&led_cdev->led_access ->&cfs_rq->removed.lock ->&mousedev->mutex/1 FD: 156 BD: 2 +.+.: (work_completion)(&rfkill_global_led_trigger_work) ->rfkill_global_mutex ->rfkill_global_mutex.wait_lock ->&p->pi_lock FD: 155 BD: 9 +.+.: rfkill_global_mutex ->fs_reclaim ->&____s->seqcount ->&zone->lock ->pool_lock#2 ->&obj_hash[i].lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&dev->power.lock ->dpm_list_mtx ->&rfkill->lock ->uevent_sock_mutex ->&n->list_lock ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#40 ->triggers_list_lock ->leds_list_lock ->&pool->lock ->&rq->__lock ->rfkill_global_mutex.wait_lock ->&cfs_rq->removed.lock ->uevent_sock_mutex.wait_lock ->&p->pi_lock ->&data->mtx ->&____s->seqcount#2 ->rcu_node_0 FD: 1 BD: 7 ....: input_devices_poll_wait.lock FD: 45 BD: 3 ++++: (netlink_chain).rwsem ->pool_lock#2 ->&obj_hash[i].lock ->&rq->__lock ->reg_indoor_lock ->hwsim_radio_lock ->&c->lock ->rcu_node_0 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&cfs_rq->removed.lock ->&n->list_lock ->&q->instances_lock ->&log->instances_lock ->&nft_net->commit_mutex ->&____s->seqcount#2 ->&____s->seqcount FD: 13 BD: 1 ++++: proto_tab_lock ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->&c->lock ->raw_sk_list.lock ->&n->list_lock ->&____s->seqcount#2 FD: 3 BD: 1 ....: random_ready_notifier.lock ->crngs.lock FD: 1 BD: 2 ....: misc_minors_ida.xa_lock FD: 36 BD: 1 ....: vga_lock#2 ->pci_config_lock ->(console_sem).lock FD: 115 BD: 1 +.+.: &type->s_umount_key#12/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#11 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 41 BD: 2 +.+.: &sb->s_type->i_lock_key#11 ->&dentry->d_lock FD: 130 BD: 1 +.+.: (work_completion)(&tracerfs_init_work) ->pin_fs_lock ->fs_reclaim ->pool_lock#2 ->&zone->lock ->&____s->seqcount ->&c->lock ->sb_lock ->&type->s_umount_key#13/1 ->&type->s_umount_key#14 ->mnt_id_ida.xa_lock ->pcpu_alloc_mutex ->&dentry->d_lock ->mount_lock ->&obj_hash[i].lock ->&fsnotify_mark_srcu ->&sb->s_type->i_mutex_key#5 ->event_mutex ->(module_notify_list).rwsem ->trace_types_lock FD: 117 BD: 2 +.+.: &type->s_umount_key#13/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#12 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock ->&type->s_umount_key#14 FD: 41 BD: 8 +.+.: &sb->s_type->i_lock_key#12 ->&dentry->d_lock FD: 108 BD: 3 +.+.: &type->s_umount_key#14 ->sb_lock ->list_lrus_mutex ->&xa->xa_lock#3 ->&obj_hash[i].lock ->pool_lock#2 ->shrinker_rwsem ->&rsp->gp_wait ->pcpu_lock ->fs_reclaim ->&dentry->d_lock ->&lru->node[i].lock ->&____s->seqcount FD: 11 BD: 4511 ....: &xa->xa_lock#3 ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount ->&n->list_lock FD: 92 BD: 6 +.+.: &sb->s_type->i_mutex_key#5 ->&sb->s_type->i_lock_key#12 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&c->lock ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 4 BD: 17 ..-.: &rsp->gp_wait ->&obj_hash[i].lock ->pool_lock#2 FD: 82 BD: 187 .+.+: &fsnotify_mark_srcu ->&conn->lock ->fs_reclaim ->pool_lock#2 ->&group->notification_lock ->&group->notification_waitq ->&____s->seqcount ->&c->lock ->&obj_hash[i].lock ->&rq->__lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&cfs_rq->removed.lock ->remove_cache_srcu ->stock_lock ->&____s->seqcount#2 ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->rcu_node_0 FD: 115 BD: 1 +.+.: &type->s_umount_key#15/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#13 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 98 BD: 2 +.+.: event_mutex ->pin_fs_lock ->&sb->s_type->i_mutex_key#5 ->trace_event_sem ->trace_event_sem.wait_lock ->&p->pi_lock ->trace_types_lock FD: 41 BD: 2 +.+.: &sb->s_type->i_lock_key#13 ->&dentry->d_lock FD: 115 BD: 1 +.+.: &type->s_umount_key#16/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->&c->lock ->&zone->lock ->&____s->seqcount ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#14 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 1 BD: 5 ....: trace_event_sem.wait_lock FD: 41 BD: 2 +.+.: &sb->s_type->i_lock_key#14 ->&dentry->d_lock FD: 267 BD: 2 +.+.: timer_update_work ->timer_keys_mutex FD: 266 BD: 3 +.+.: timer_keys_mutex ->cpu_hotplug_lock FD: 115 BD: 1 +.+.: &type->s_umount_key#17/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#15 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 41 BD: 2 +.+.: &sb->s_type->i_lock_key#15 ->&dentry->d_lock FD: 78 BD: 1 +.+.: kclist_lock ->resource_lock ->fs_reclaim ->pool_lock#2 FD: 115 BD: 1 +.+.: &type->s_umount_key#18/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#16 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 41 BD: 146 +.+.: &sb->s_type->i_lock_key#16 ->&dentry->d_lock FD: 211 BD: 33 .+.+: tomoyo_ss ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->tomoyo_policy_lock ->(console_sem).lock ->console_owner_lock ->console_owner ->&c->lock ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock ->&dentry->d_lock ->tomoyo_log_lock ->tomoyo_log_wait.lock ->file_systems_lock ->fs_reclaim ->&mm->mmap_lock ->quarantine_lock ->&rq->__lock ->rcu_node_0 ->&n->list_lock ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&base->lock ->tomoyo_policy_lock.wait_lock ->&p->pi_lock ->&cfs_rq->removed.lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->mount_lock ->&____s->seqcount#2 ->&rcu_state.expedited_wq ->stock_lock FD: 115 BD: 1 +.+.: &type->s_umount_key#19/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#17 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 41 BD: 4 +.+.: &sb->s_type->i_lock_key#17 ->&dentry->d_lock FD: 94 BD: 1 +.+.: &ns->lock ->&dentry->d_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#6 FD: 92 BD: 2 +.+.: &sb->s_type->i_mutex_key#6 ->&sb->s_type->i_lock_key#17 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&c->lock ->&zone->lock ->&____s->seqcount FD: 43 BD: 1 +.+.: &type->s_umount_key#20 ->sb_lock ->&dentry->d_lock FD: 77 BD: 1 +.+.: pnp_lock ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 +.+.: subsys mutex#19 FD: 3 BD: 1 +.+.: subsys mutex#20 ->&k->k_lock FD: 3 BD: 10 +.+.: subsys mutex#21 ->&k->k_lock FD: 3 BD: 1 +.+.: subsys mutex#22 ->&k->k_lock FD: 338 BD: 1 +.+.: tty_mutex ->(console_sem).lock ->console_lock ->fs_reclaim ->pool_lock#2 ->tty_ldiscs_lock ->&obj_hash[i].lock ->&k->list_lock ->&k->k_lock ->&tty->legacy_mutex FD: 4 BD: 1 +.+.: subsys mutex#23 ->&k->list_lock ->&k->k_lock FD: 1 BD: 1 ....: netevent_notif_chain.lock FD: 183 BD: 11 ++++: clients_rwsem ->fs_reclaim ->clients.xa_lock ->&device->client_data_rwsem FD: 2 BD: 12 +.+.: clients.xa_lock ->pool_lock#2 FD: 769 BD: 10 ++++: devices_rwsem ->rcu_node_0 ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->devices.xa_lock ->&obj_hash[i].lock ->(console_sem).lock ->console_owner_lock ->console_owner ->&pdata->netdev_lock ->&table->lock#4 ->&cfs_rq->removed.lock ->clients_rwsem ->rdma_nets_rwsem ->rdma_nets_rwsem.wait_lock ->&p->pi_lock FD: 1 BD: 1 +.+.: (blocking_lsm_notifier_chain).rwsem FD: 144 BD: 64 ++++: (inetaddr_chain).rwsem ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->fib_info_lock ->&c->lock ->&dir->lock#2 ->&____s->seqcount ->nl_table_lock ->&obj_hash[i].lock ->nl_table_wait.lock ->&net->sctp.local_addr_lock ->rlock-AF_NETLINK ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->remove_cache_srcu ->&ipvlan->addrs_lock ->quarantine_lock ->&____s->seqcount#2 ->krc.lock ->&tbl->lock ->class ->(&tbl->proxy_timer) ->&base->lock ->stock_lock FD: 1 BD: 5 ....: inet6addr_chain.lock FD: 1 BD: 1 +.+.: buses_mutex FD: 1 BD: 1 +.+.: offload_lock FD: 1 BD: 1 +...: inetsw_lock FD: 766 BD: 1 +.+.: (wq_completion)events_power_efficient ->(work_completion)(&(&tbl->managed_work)->work) ->(check_lifetime_work).work ->(work_completion)(&(&cache_cleaner)->work) ->(work_completion)(&(&ops->cursor_work)->work) ->(work_completion)(&(&hub->init_work)->work) ->(work_completion)(&(&gc_work->dwork)->work) ->(work_completion)(&(&tbl->gc_work)->work) ->(reg_check_chans).work ->(crda_timeout).work ->(gc_work).work ->&rq->__lock FD: 48 BD: 2 +.+.: (work_completion)(&(&tbl->managed_work)->work) ->&tbl->lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 47 BD: 3951 +.-.: &tbl->lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->batched_entropy_u32.lock ->&n->lock ->nl_table_lock ->nl_table_wait.lock ->&dir->lock#2 ->krc.lock ->&c->lock ->&____s->seqcount ->rlock-AF_NETLINK ->&n->list_lock ->&____s->seqcount#2 ->quarantine_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->init_task.mems_allowed_seq.seqcount FD: 1 BD: 66 +.+.: ptype_lock FD: 32 BD: 2 +.+.: (check_lifetime_work).work ->&obj_hash[i].lock ->&base->lock ->rcu_node_0 ->&rq->__lock ->&cfs_rq->removed.lock FD: 1 BD: 64 +.+.: &net->rules_mod_lock FD: 1 BD: 1 +.+.: tcp_ulp_list_lock FD: 1 BD: 1 +...: xfrm_state_afinfo_lock FD: 1 BD: 1 +.+.: xfrm_policy_afinfo_lock FD: 1 BD: 1 +...: xfrm_input_afinfo_lock FD: 18 BD: 4408 ..-.: krc.lock ->&obj_hash[i].lock ->hrtimer_bases.lock ->&base->lock FD: 82 BD: 1 +.+.: (wq_completion)events_highpri ->(work_completion)(&(&krcp->page_cache_work)->work) ->(work_completion)(flush) ->(work_completion)(&barr->work) FD: 78 BD: 2 +.+.: (work_completion)(&(&krcp->page_cache_work)->work) ->fs_reclaim ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->krc.lock FD: 1 BD: 3 +.+.: &hashinfo->lock FD: 1 BD: 1 +.+.: tcp_cong_list_lock FD: 2 BD: 7 +.+.: cache_list_lock ->&cd->hash_lock FD: 30 BD: 2 +.+.: (work_completion)(&(&cache_cleaner)->work) ->cache_list_lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 1 BD: 1 +.+.: (rpc_pipefs_notifier_list).rwsem FD: 1 BD: 1 +.+.: svc_xprt_class_lock FD: 35 BD: 1 +.+.: xprt_list_lock ->(console_sem).lock FD: 1 BD: 1 ....: pcibios_fwaddrmap_lock FD: 160 BD: 3 ++++: umhelper_sem ->usermodehelper_disabled_waitq.lock ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->&k->k_lock ->subsys mutex#79 ->fw_lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->&rq->__lock ->&x->wait#23 ->&base->lock ->&pool->lock ->(&timer.timer) ->dev_pm_qos_sysfs_mtx ->kernfs_idr_lock ->deferred_probe_mutex ->device_links_lock ->mmu_notifier_invalidate_range_start FD: 1 BD: 4 ....: usermodehelper_disabled_waitq.lock FD: 110 BD: 3 .+.+: sb_writers#2 ->mount_lock ->&sb->s_type->i_mutex_key/1 ->&sb->s_type->i_mutex_key FD: 104 BD: 4 +.+.: &sb->s_type->i_mutex_key/1 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->&obj_hash[i].lock ->tomoyo_ss ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#2 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_mutex_key FD: 1 BD: 2 +.+.: tomoyo_log_lock FD: 1 BD: 2 ....: tomoyo_log_wait.lock FD: 67 BD: 144 +.+.: &wb->list_lock ->&sb->s_type->i_lock_key#2 ->&sb->s_type->i_lock_key#23 ->&sb->s_type->i_lock_key#22 ->&sb->s_type->i_lock_key ->&sb->s_type->i_lock_key#5 ->&sb->s_type->i_lock_key#8 ->&sb->s_type->i_lock_key#24 ->&sb->s_type->i_lock_key#3 ->&p->sequence ->key#10 ->&sb->s_type->i_lock_key#27 ->&sb->s_type->i_lock_key#16 FD: 1 BD: 1 +.+.: &drv->dynids.lock FD: 178 BD: 2 +.+.: (work_completion)(&sub_info->work) ->&sighand->siglock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount ->free_vmap_area_lock ->vmap_area_lock ->init_mm.page_table_lock ->batched_entropy_u64.lock ->&obj_hash[i].lock ->init_files.file_lock ->init_fs.lock ->&p->alloc_lock ->lock ->pidmap_lock ->cgroup_threadgroup_rwsem ->input_pool.lock ->&p->pi_lock ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&sig->wait_chldexit ->tasklist_lock ->&prev->lock ->css_set_lock ->&x->wait#17 ->&cfs_rq->removed.lock ->rcu_node_0 ->&n->list_lock ->&____s->seqcount#2 ->remove_cache_srcu ->&rcu_state.expedited_wq FD: 14 BD: 1 +.-.: (&tcp_orphan_timer) ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 1 +.+.: umh_sysctl_lock FD: 74 BD: 4451 ++++: &anon_vma->rwsem ->&mm->page_table_lock ->&rq->__lock ->&obj_hash[i].lock ->pool_lock#2 ->&meta->lock ->kfence_freelist_lock ->quarantine_lock ->rcu_node_0 ->&c->lock ->&____s->seqcount ->&n->list_lock ->&sem->wait_lock ->&cfs_rq->removed.lock ->&p->pi_lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->mmu_notifier_invalidate_range_start ->ptlock_ptr(page) ->ptlock_ptr(page)#2 ->stock_lock ->&____s->seqcount#2 ->&rcu_state.expedited_wq ->&rcu_state.gp_wq ->batched_entropy_u8.lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 1 BD: 4979 -.-.: per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 9 BD: 4485 +.+.: lock#4 ->&lruvec->lru_lock ->&obj_hash[i].lock ->lock#10 FD: 260 BD: 1 +.+.: &sig->cred_guard_mutex ->fs_reclaim ->pool_lock#2 ->&fs->lock ->&zone->lock ->&____s->seqcount ->&c->lock ->&dentry->d_lock ->&sb->s_type->i_mutex_key ->&obj_hash[i].lock ->delayed_uprobe_lock ->&mm->mmap_lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->rcu_node_0 ->&rq->__lock ->pool_lock ->&n->list_lock ->&cfs_rq->removed.lock ->quarantine_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&dentry->d_lock/1 ->&base->lock ->init_fs.lock ->&type->i_mutex_dir_key#3 ->&sb->s_type->i_lock_key#22 ->&sb->s_type->i_mutex_key#8 ->&p->pi_lock ->aa_buffers_lock ->mapping.invalidate_lock ->&folio_wait_table[i] ->tomoyo_ss ->&iint->mutex ->binfmt_lock ->entries_lock ->&ei->xattr_sem ->&tsk->futex_exit_mutex ->&sig->exec_update_lock ->&p->alloc_lock ->tk_core.seq.seqcount ->&stopper->lock ->&stop_pi_lock ->&x->wait#8 ->&lock->wait_lock ->key#5 ->remove_cache_srcu ->&meta->lock ->&____s->seqcount#2 ->&rcu_state.expedited_wq ->&sem->wait_lock FD: 2 BD: 4486 ..-.: &lruvec->lru_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 3 BD: 4479 +.+.: lock#5 ->&lruvec->lru_lock FD: 77 BD: 99 ++++: &vma->vm_lock->lock ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->ptlock_ptr(page)#2 ->mmu_notifier_invalidate_range_start ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&rq->__lock ->rcu_node_0 ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->ptlock_ptr(page) ->&rcu_state.gp_wq ->&lruvec->lru_lock ->&rcu_state.expedited_wq ->remove_cache_srcu ->&c->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&p->pi_lock ->&n->list_lock ->stock_lock FD: 205 BD: 2 +.+.: &tsk->futex_exit_mutex ->&p->pi_lock ->&rq->__lock ->&mm->mmap_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 31 BD: 1 +.+.: &child->perf_event_mutex ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 113 ....: &pid->wait_pidfd FD: 29 BD: 123 ....: &sig->wait_chldexit ->&p->pi_lock FD: 15 BD: 123 ....: &(&sig->stats_lock)->lock ->&____s->seqcount#5 FD: 14 BD: 124 ....: &____s->seqcount#5 ->pidmap_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 35 BD: 1 +.+.: low_water_lock ->(console_sem).lock ->console_owner_lock ->console_owner FD: 273 BD: 1 +.+.: vendor_module_lock ->slab_mutex ->pcpu_alloc_mutex ->pool_lock#2 ->&obj_hash[i].lock ->percpu_counters_lock ->fs_reclaim ->shrinker_rwsem ->&zone->lock ->&____s->seqcount ->cpu_hotplug_lock ->timekeeper_lock FD: 31 BD: 1 ..-.: &(&cache_cleaner)->timer FD: 1 BD: 5008 -.-.: pvclock_gtod_data FD: 118 BD: 2 ++++: &type->i_mutex_dir_key#2 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->rename_lock.seqcount ->&c->lock ->&zone->lock ->&____s->seqcount ->namespace_sem ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#5 ->&sem->wait_lock ->&rq->__lock ->&n->list_lock ->remove_cache_srcu ->&xa->xa_lock#3 ->&obj_hash[i].lock ->stock_lock ->tomoyo_ss ->&sbinfo->stat_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->batched_entropy_u32.lock FD: 3 BD: 110 +.+.: subsys mutex#24 ->&k->k_lock FD: 3 BD: 110 +.+.: subsys mutex#25 ->&k->k_lock FD: 1 BD: 1 +.+.: subsys mutex#26 FD: 167 BD: 1 +.+.: subsys mutex#27 ->&k->list_lock ->&k->k_lock ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->platform_devid_ida.xa_lock ->&____s->seqcount ->lock ->&root->kernfs_rwsem ->bus_type_sem ->&c->lock ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->&(&priv->bus_notifier)->rwsem ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#3 ->wakeup_ida.xa_lock ->gdp_mutex ->subsys mutex#15 ->events_lock ->rtcdev_lock FD: 1 BD: 1 +.+.: subsys mutex#28 FD: 38 BD: 2 +.+.: (work_completion)(&p->wq) ->vmap_area_lock ->&obj_hash[i].lock ->purge_vmap_area_lock ->pool_lock#2 ->&meta->lock ->kfence_freelist_lock ->rcu_node_0 ->&rq->__lock ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->quarantine_lock ->&base->lock FD: 31 BD: 1 ..-.: &(&group->avgs_work)->timer FD: 31 BD: 1 ..-.: &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer FD: 33 BD: 1 ..-.: mm/memcontrol.c:589 FD: 29 BD: 2 +.+.: (work_completion)(&(&group->avgs_work)->work) ->&group->avgs_lock ->&rq->__lock FD: 28 BD: 3 +.+.: &group->avgs_lock ->&per_cpu_ptr(group->pcpu, cpu)->seq ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 32 BD: 2 +.+.: (stats_flush_dwork).work ->cgroup_rstat_lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->&cfs_rq->removed.lock ->rcu_node_0 ->&rcu_state.expedited_wq ->pool_lock#2 FD: 2 BD: 18 ....: cgroup_rstat_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 1 BD: 111 +.+.: subsys mutex#29 FD: 1 BD: 4 +.+.: key_user_lock FD: 1 BD: 4 +.+.: key_serial_lock FD: 5 BD: 5 +.+.: key_construction_mutex ->&obj_hash[i].lock ->pool_lock#2 ->keyring_name_lock FD: 84 BD: 3 +.+.: &type->lock_class ->keyring_serialise_link_lock ->fs_reclaim ->pool_lock#2 ->key_user_lock ->crngs.lock ->key_serial_lock ->key_construction_mutex ->ima_keys_lock ->&c->lock ->&____s->seqcount FD: 80 BD: 4 +.+.: keyring_serialise_link_lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->root_key_user.lock ->key_construction_mutex ->&zone->lock FD: 1 BD: 1 ....: &pgdat->kswapd_wait FD: 1 BD: 1 +.+.: drivers_lock FD: 106 BD: 1 +.+.: damon_dbgfs_lock ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->tk_core.seq.seqcount ->damon_ops_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 FD: 115 BD: 1 +.+.: &type->s_umount_key#21/1 ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&c->lock ->&____s->seqcount ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#18 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 41 BD: 2 +.+.: &sb->s_type->i_lock_key#18 ->&dentry->d_lock FD: 1 BD: 1 +.+.: dq_list_lock FD: 115 BD: 1 +.+.: &type->s_umount_key#22/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#19 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 41 BD: 7 +.+.: &sb->s_type->i_lock_key#19 ->&dentry->d_lock FD: 1 BD: 1 +.+.: configfs_subsystem_mutex FD: 99 BD: 1 +.+.: &sb->s_type->i_mutex_key#7/1 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->&zone->lock ->&____s->seqcount ->&c->lock ->configfs_dirent_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#19 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&default_group_class[depth - 1]/2 ->&default_group_class[depth - 1]#2 FD: 1 BD: 8 +.+.: configfs_dirent_lock FD: 97 BD: 2 +.+.: &default_group_class[depth - 1]/2 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->configfs_dirent_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#19 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&default_group_class[depth - 1]#3/2 ->&c->lock ->&zone->lock ->&____s->seqcount FD: 77 BD: 1 +.+.: ecryptfs_daemon_hash_mux ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 ....: &ecryptfs_kthread_ctl.wait FD: 2 BD: 1 +.+.: ecryptfs_msg_ctx_lists_mux ->&ecryptfs_msg_ctx_arr[i].mux FD: 1 BD: 2 +.+.: &ecryptfs_msg_ctx_arr[i].mux FD: 1 BD: 1 +.+.: nfs_version_lock FD: 98 BD: 1 ++++: key_types_sem ->(console_sem).lock ->&rq->__lock ->asymmetric_key_parsers_sem ->&type->lock_class ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 1 +.+.: pnfs_spinlock FD: 1 BD: 5 +.+.: &sn->pipefs_sb_lock FD: 1 BD: 1 +.+.: nls_lock FD: 1 BD: 1 +.+.: jffs2_compressor_list_lock FD: 1 BD: 1 +.+.: next_tag_value_lock FD: 1 BD: 1 ....: log_redrive_lock FD: 2 BD: 1 ....: &TxAnchor.LazyLock ->jfs_commit_thread_wait.lock FD: 1 BD: 2 ....: jfs_commit_thread_wait.lock FD: 1 BD: 1 +.+.: jfsTxnLock FD: 35 BD: 1 +.+.: ocfs2_stack_lock ->(console_sem).lock FD: 1 BD: 1 +.+.: o2hb_callback_sem FD: 1 BD: 1 +.+.: o2net_handler_lock FD: 115 BD: 1 +.+.: &type->s_umount_key#23/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#20 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock ->&____s->seqcount#2 ->&xa->xa_lock#3 ->&obj_hash[i].lock ->stock_lock ->&rq->__lock ->&n->list_lock ->pcpu_alloc_mutex.wait_lock ->&p->pi_lock FD: 41 BD: 5 +.+.: &sb->s_type->i_lock_key#20 ->&dentry->d_lock FD: 265 BD: 72 +.+.: nf_hook_mutex ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&zone->lock ->&obj_hash[i].lock ->&rq->__lock ->nf_hook_mutex.wait_lock ->stock_lock ->&____s->seqcount#2 ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->rcu_node_0 ->cpu_hotplug_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->remove_cache_srcu ->&rcu_state.expedited_wq FD: 77 BD: 1 ++++: alg_types_sem ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 +.+.: dma_list_mutex FD: 89 BD: 2 ++++: asymmetric_key_parsers_sem ->(console_sem).lock ->fs_reclaim ->pool_lock#2 ->crypto_alg_sem ->&obj_hash[i].lock ->(crypto_chain).rwsem ->&x->wait#21 ->&base->lock ->&rq->__lock ->(&timer.timer) ->&c->lock ->&____s->seqcount ->&zone->lock FD: 755 BD: 1 +.+.: blkcg_pol_register_mutex ->blkcg_pol_mutex ->cgroup_mutex FD: 1 BD: 4 +.+.: elv_list_lock FD: 81 BD: 1 +.+.: crc_t10dif_mutex ->crypto_alg_sem ->fs_reclaim ->pool_lock#2 FD: 81 BD: 1 +.+.: crc64_rocksoft_mutex ->crypto_alg_sem ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 +.+.: ts_mod_lock FD: 30 BD: 424 +.+.: &dentry->d_lock/1 ->&lru->node[i].lock FD: 3 BD: 7 +.+.: subsys mutex#30 ->&k->k_lock FD: 36 BD: 10 +.+.: &dev->mutex#2 ->&obj_hash[i].lock FD: 32 BD: 5 ....: wakeup_srcu_srcu_usage.lock ->&obj_hash[i].lock ->&ACCESS_PRIVATE(sdp, lock) FD: 1 BD: 3 ....: wakeup_srcu FD: 1 BD: 3 ....: (&ws->timer) FD: 1 BD: 256 +.+.: klist_remove_lock FD: 6 BD: 4181 ....: &ws->lock ->tk_core.seq.seqcount ->&obj_hash[i].lock FD: 1 BD: 3 ....: deleted_ws.lock FD: 134 BD: 1 +.+.: register_count_mutex ->&k->list_lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&k->k_lock ->uevent_sock_mutex ->&obj_hash[i].lock ->running_helpers_waitq.lock ->&rq->__lock FD: 1 BD: 1 +.+.: (cpufreq_policy_notifier_list).rwsem FD: 1 BD: 1 +.+.: cpuidle_driver_lock FD: 1 BD: 1 ....: thermal_cdev_ida.xa_lock FD: 1 BD: 3 ....: cpufreq_driver_lock FD: 3 BD: 1 +.+.: subsys mutex#31 ->&k->k_lock FD: 1 BD: 1 +.+.: (x86_mce_decoder_chain).rwsem FD: 1 BD: 1 ....: virtio_index_ida.xa_lock FD: 1 BD: 1 +.+.: subsys mutex#32 FD: 145 BD: 108 +.+.: &md->mutex ->fs_reclaim ->pool_lock#2 ->irq_domain_mutex ->pci_config_lock ->&xa->xa_lock#4 ->&domain->mutex ->&irq_desc_lock_class ->vector_lock ->&root->kernfs_rwsem ->lock ->&c->lock ->&____s->seqcount ->&zone->lock FD: 2 BD: 109 +.+.: &xa->xa_lock#4 ->pool_lock#2 FD: 1 BD: 1 +.+.: &dev->vqs_list_lock FD: 1 BD: 1 ....: &vp_dev->lock FD: 1 BD: 1 +.+.: (oom_notify_list).rwsem FD: 1 BD: 1 ....: &dev->config_lock FD: 1 BD: 1 +.+.: vdpa_dev_lock FD: 3 BD: 1 +.+.: subsys mutex#33 ->&k->k_lock FD: 234 BD: 1 +.+.: serial_mutex ->gpio_lookup_lock ->port_mutex FD: 1 BD: 2 +.+.: gpio_lookup_lock FD: 232 BD: 2 +.+.: port_mutex ->&port->mutex FD: 231 BD: 9 +.+.: &port->mutex ->fs_reclaim ->pool_lock#2 ->console_mutex ->resource_lock ->&port_lock_key ->(console_sem).lock ->ctrl_ida.xa_lock ->&x->wait#9 ->&obj_hash[i].lock ->&dev->power.lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->dpm_list_mtx ->&c->lock ->&____s->seqcount ->uevent_sock_mutex ->running_helpers_waitq.lock ->&zone->lock ->subsys mutex#34 ->semaphore->lock ->*(&acpi_gbl_reference_count_lock) ->dev_pm_qos_sysfs_mtx ->kernfs_idr_lock ->deferred_probe_mutex ->device_links_lock ->mmu_notifier_invalidate_range_start ->gdp_mutex ->req_lock ->&p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&x->wait#11 ->subsys mutex#21 ->chrdevs_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->hash_mutex ->&i->lock ->&desc->request_mutex ->register_lock ->&irq_desc_lock_class ->proc_subdir_lock ->proc_inum_ida.xa_lock FD: 1 BD: 10 ....: ctrl_ida.xa_lock FD: 1 BD: 10 +.+.: subsys mutex#34 FD: 1 BD: 1 ....: rng_index_ida.xa_lock FD: 80 BD: 1 +.+.: rng_mutex ->&x->wait#13 ->fs_reclaim ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&rq->__lock ->&x->wait ->&obj_hash[i].lock FD: 29 BD: 2 -.-.: &x->wait#12 ->&p->pi_lock FD: 1 BD: 2 ....: &x->wait#13 FD: 31 BD: 1 +.+.: reading_mutex ->reading_mutex.wait_lock ->&rq->__lock ->&x->wait#12 FD: 1 BD: 2 +.+.: reading_mutex.wait_lock FD: 1 BD: 1 ....: &dev->managed.lock FD: 115 BD: 1 +.+.: &type->s_umount_key#24/1 ->fs_reclaim ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#21 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&obj_hash[i].lock ->&dentry->d_lock FD: 41 BD: 2 +.+.: &sb->s_type->i_lock_key#21 ->&dentry->d_lock FD: 2 BD: 221 ....: drm_minor_lock ->pool_lock#2 FD: 1 BD: 1 +.+.: &dev->debugfs_mutex FD: 3 BD: 3 +.+.: subsys mutex#35 ->&k->k_lock FD: 1 BD: 1 ....: (worker)->lock FD: 77 BD: 23 +.+.: &dev->mode_config.idr_mutex ->fs_reclaim ->pool_lock#2 FD: 102 BD: 19 +.+.: crtc_ww_class_acquire ->crtc_ww_class_mutex ->fs_reclaim ->pool_lock#2 FD: 101 BD: 20 +.+.: crtc_ww_class_mutex ->reservation_ww_class_acquire ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->&dev->mode_config.idr_mutex ->&dev->mode_config.blob_lock ->&zone->lock ->&crtc->commit_lock ->reservation_ww_class_mutex ->tk_core.seq.seqcount ->&vkms_out->lock ->&dev->vbl_lock ->&x->wait#14 ->(work_completion)(&vkms_state->composer_work) ->&base->lock ->&rq->__lock ->(&timer.timer) ->(work_completion)(&vkms_state->composer_work)#2 FD: 1 BD: 21 +.+.: &dev->mode_config.blob_lock FD: 1 BD: 1 ....: &xa->xa_lock#5 FD: 1 BD: 1 ....: &xa->xa_lock#6 FD: 1 BD: 22 ....: &dev->mode_config.connector_list_lock FD: 20 BD: 24 ..-.: &dev->vbl_lock ->&dev->vblank_time_lock FD: 160 BD: 1 .+.+: drm_connector_list_iter ->&dev->mode_config.connector_list_lock ->fs_reclaim ->pool_lock#2 ->&connector->mutex FD: 158 BD: 2 +.+.: &connector->mutex ->fs_reclaim ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#35 ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->&dev->mode_config.idr_mutex ->connector_list_lock FD: 1 BD: 3 +.+.: connector_list_lock FD: 31 BD: 1 ..-.: &(&krcp->monitor_work)->timer FD: 31 BD: 1 ..-.: &(&tbl->managed_work)->timer FD: 35 BD: 2 +.+.: (work_completion)(&(&krcp->monitor_work)->work) ->krc.lock ->&obj_hash[i].lock ->&rq->__lock ->rcu_node_0 FD: 1 BD: 1 +.+.: &dev->filelist_mutex FD: 146 BD: 15 +.+.: &helper->lock ->fs_reclaim ->pool_lock#2 ->&client->modeset_mutex ->&obj_hash[i].lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&mgr->vm_lock ->&dev->object_name_lock ->&node->vm_lock ->&file_private->table_lock ->&dev->mode_config.idr_mutex ->&dev->mode_config.fb_lock ->&file->fbs_lock ->&prime_fpriv->lock ->free_vmap_area_lock ->vmap_area_lock ->&zone->lock ->&____s->seqcount ->init_mm.page_table_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&c->lock ->&dev->master_mutex ->&lock->wait_lock ->&pool->lock ->reservation_ww_class_mutex FD: 104 BD: 17 +.+.: &client->modeset_mutex ->&dev->mode_config.mutex ->fs_reclaim ->pool_lock#2 ->crtc_ww_class_acquire FD: 103 BD: 18 +.+.: &dev->mode_config.mutex ->crtc_ww_class_acquire ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock FD: 2 BD: 16 +.+.: &mgr->vm_lock ->pool_lock#2 FD: 52 BD: 16 +.+.: &dev->object_name_lock ->lock FD: 4 BD: 221 +.+.: &file_private->table_lock ->pool_lock#2 ->&obj_hash[i].lock FD: 4 BD: 16 +.+.: &node->vm_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 16 +.+.: &dev->mode_config.fb_lock FD: 1 BD: 16 +.+.: &file->fbs_lock FD: 1 BD: 16 +.+.: &prime_fpriv->lock FD: 202 BD: 1 +.+.: registration_lock ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&rq->__lock ->&x->wait#11 ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#11 ->vt_switch_mutex ->(console_sem).lock ->console_lock FD: 77 BD: 2 +.+.: vt_switch_mutex ->fs_reclaim ->pool_lock#2 FD: 1 BD: 13 +.+.: &fb_info->lock FD: 105 BD: 16 +.+.: &dev->master_mutex ->&client->modeset_mutex FD: 1 BD: 21 +.+.: &crtc->commit_lock FD: 83 BD: 115 +.+.: &shmem->vmap_lock ->&shmem->pages_lock ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->init_mm.page_table_lock ->&zone->lock FD: 79 BD: 116 +.+.: &shmem->pages_lock ->fs_reclaim ->pool_lock#2 ->&zone->lock ->&____s->seqcount ->&xa->xa_lock#7 ->lock#4 ->&info->lock ->&rq->__lock FD: 42 BD: 4507 ..-.: &xa->xa_lock#7 ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&zone->lock ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->key#10 ->&s->s_inode_wblist_lock ->&base->lock ->key#12 ->&wb->work_lock ->&n->list_lock ->key#13 ->&pl->lock ->stock_lock ->&xa->xa_lock#3 ->&____s->seqcount#2 ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->key#27 FD: 2 BD: 124 ....: &info->lock ->key#9 FD: 36 BD: 21 -.-.: &vkms_out->lock ->&dev->event_lock FD: 35 BD: 22 -.-.: &dev->event_lock ->&dev->vbl_lock ->&____s->seqcount#6 ->&x->wait#14 ->&obj_hash[i].lock ->pool_lock#2 ->&dev->vblank_time_lock ->&vblank->queue ->&base->lock FD: 1 BD: 27 ----: &____s->seqcount#6 FD: 29 BD: 23 -...: &x->wait#14 ->&p->pi_lock FD: 19 BD: 25 -.-.: &dev->vblank_time_lock ->tk_core.seq.seqcount ->&(&vblank->seqlock)->lock ->&obj_hash[i].lock ->hrtimer_bases.lock FD: 2 BD: 26 -.-.: &(&vblank->seqlock)->lock ->&____s->seqcount#6 FD: 1 BD: 21 +.+.: (work_completion)(&vkms_state->composer_work) FD: 1 BD: 17 ....: &helper->damage_lock FD: 148 BD: 2 +.+.: (work_completion)(&helper->damage_work) ->&helper->damage_lock ->&helper->lock FD: 1 BD: 3985 +.+.: &lock->wait_lock FD: 1 BD: 23 -.-.: &vblank->queue FD: 1 BD: 21 +.+.: (work_completion)(&vkms_state->composer_work)#2 FD: 1 BD: 13 ....: vt_event_lock FD: 1 BD: 1 +.+.: kernel_fb_helper_lock FD: 1 BD: 1 +.+.: &dev->clientlist_mutex FD: 1 BD: 1 +...: &dev->queue_lock FD: 1 BD: 8 ....: blk_queue_ida.xa_lock FD: 2 BD: 6 +.+.: &xa->xa_lock#8 ->pool_lock#2 FD: 38 BD: 268 ....: &q->queue_lock ->&blkcg->lock ->pool_lock#2 ->pcpu_lock ->&obj_hash[i].lock ->percpu_counters_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&n->list_lock FD: 21 BD: 269 ....: &blkcg->lock ->pool_lock#2 ->percpu_ref_switch_lock ->(&sq->pending_timer) ->&obj_hash[i].lock ->&base->lock ->percpu_counters_lock ->pcpu_lock ->pool_lock ->&c->lock FD: 279 BD: 8 +.+.: &q->sysfs_lock ->&q->unused_hctx_lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&obj_hash[i].lock ->cpu_hotplug_lock ->fs_reclaim ->&xa->xa_lock#9 ->&q->debugfs_mutex ->&c->lock ->&zone->lock ->&____s->seqcount ->pcpu_alloc_mutex ->&q->mq_freeze_lock ->percpu_ref_switch_lock ->&q->queue_lock ->&stats->lock ->lock ->&root->kernfs_rwsem ->set->srcu ->&n->list_lock ->&rq->__lock ->&cfs_rq->removed.lock FD: 1 BD: 9 +.+.: &q->unused_hctx_lock FD: 1 BD: 8 +.+.: &bdev->bd_size_lock FD: 2 BD: 11 +.+.: &xa->xa_lock#9 ->pool_lock#2 FD: 31 BD: 5 +.+.: &set->tag_list_lock ->&q->mq_freeze_lock ->percpu_ref_switch_lock FD: 30 BD: 10 +.+.: &q->mq_freeze_lock ->percpu_ref_switch_lock ->&q->mq_freeze_wq ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 5 BD: 274 ..-.: percpu_ref_switch_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 275 ....: &q->mq_freeze_wq FD: 3 BD: 6 +.+.: subsys mutex#36 ->&k->k_lock FD: 280 BD: 6 +.+.: &q->sysfs_dir_lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&c->lock ->&zone->lock ->&____s->seqcount ->&q->sysfs_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&obj_hash[i].lock FD: 103 BD: 9 +.+.: &q->debugfs_mutex ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 FD: 3 BD: 5 +.+.: subsys mutex#37 ->&k->k_lock FD: 1 BD: 5 ....: cgwb_lock FD: 1 BD: 5 +...: bdi_lock FD: 62 BD: 309 +.+.: inode_hash_lock ->&sb->s_type->i_lock_key#3 ->&sb->s_type->i_lock_key#22 ->&s->s_inode_list_lock ->&sb->s_type->i_lock_key#24 ->&sb->s_type->i_lock_key#30 ->&sb->s_type->i_lock_key#31 FD: 1 BD: 4 +.+.: bdev_lock FD: 301 BD: 3 +.+.: &disk->open_mutex ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->init_mm.page_table_lock ->&zone->lock ->&xa->xa_lock#7 ->lock#4 ->mmu_notifier_invalidate_range_start ->&c->lock ->&mapping->private_lock ->tk_core.seq.seqcount ->&ret->b_uptodate_lock ->&obj_hash[i].lock ->purge_vmap_area_lock ->&sb->s_type->i_lock_key#3 ->lock#5 ->&lruvec->lru_lock ->&rq->__lock ->&base->lock ->&hctx->lock ->&x->wait#16 ->(&timer.timer) ->&cfs_rq->removed.lock ->&q->sysfs_dir_lock ->&bdev->bd_size_lock ->&dd->lock ->&folio_wait_table[i] ->(console_sem).lock ->console_owner_lock ->console_owner ->&s->s_inode_list_lock ->pcpu_alloc_mutex ->&x->wait#9 ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&k->k_lock ->subsys mutex#36 ->&xa->xa_lock#8 ->inode_hash_lock ->bdev_lock ->&lo->lo_mutex ->nbd_index_mutex ->&nbd->config_lock ->&new->lock ->&lock->wait_lock FD: 44 BD: 4475 +.+.: &mapping->private_lock ->&xa->xa_lock#7 FD: 30 BD: 6 ..-.: &ret->b_uptodate_lock ->bit_wait_table + i FD: 14 BD: 5 ....: floppy_lock ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 1 +.+.: vsock_register_mutex FD: 29 BD: 4 ....: command_done.lock ->&p->pi_lock FD: 17 BD: 2 +.+.: floppy_work ->dma_spin_lock ->floppy_lock ->&obj_hash[i].lock ->fdc_wait.lock FD: 1 BD: 3 ....: dma_spin_lock FD: 77 BD: 1 +.+.: loop_ctl_mutex ->fs_reclaim ->pool_lock#2 FD: 1 BD: 9 ....: &stats->lock FD: 31 BD: 1 -.-.: &vb->stop_update_lock FD: 267 BD: 1 +.+.: (wq_completion)events_freezable ->(work_completion)(&vb->update_balloon_stats_work) ->&rq->__lock FD: 266 BD: 2 +.+.: (work_completion)(&vb->update_balloon_stats_work) ->cpu_hotplug_lock ->&s->s_inode_list_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 31 BD: 1 ..-.: &(&ops->cursor_work)->timer FD: 33 BD: 2 +.+.: (work_completion)(&(&ops->cursor_work)->work) ->(console_sem).lock ->&obj_hash[i].lock ->&base->lock FD: 87 BD: 4 +.+.: nbd_index_mutex ->fs_reclaim ->pool_lock#2 ->&nbd->config_lock FD: 1 BD: 11 .+.+: set->srcu FD: 17 BD: 6 +.+.: (work_completion)(&(&q->requeue_work)->work) ->&q->requeue_lock ->&hctx->lock FD: 18 BD: 6 +.+.: (work_completion)(&(&hctx->run_work)->work) FD: 292 BD: 1 +.+.: zram_index_mutex ->fs_reclaim ->pool_lock#2 ->blk_queue_ida.xa_lock ->&obj_hash[i].lock ->pcpu_alloc_mutex ->bio_slab_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->percpu_counters_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#3 ->&s->s_inode_list_lock ->&xa->xa_lock#8 ->lock ->&q->queue_lock ->&x->wait#9 ->&bdev->bd_size_lock ->&k->list_lock ->gdp_mutex ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&rq->__lock ->&x->wait#11 ->subsys mutex#36 ->dev_hotplug_mutex ->&q->sysfs_dir_lock ->percpu_ref_switch_lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#37 ->cgwb_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->bdi_lock ->inode_hash_lock ->(console_sem).lock FD: 3 BD: 1 +.+.: subsys mutex#38 ->&k->k_lock FD: 78 BD: 2 +.+.: &default_group_class[depth - 1]#2 ->fs_reclaim ->pool_lock#2 ->configfs_dirent_lock FD: 2 BD: 1 +.+.: &lock ->nullb_indexes.xa_lock FD: 1 BD: 2 ....: nullb_indexes.xa_lock FD: 1 BD: 1 +.+.: ctx_list.lock FD: 1 BD: 1 ....: nfc_index_ida.xa_lock FD: 151 BD: 3 +.+.: nfc_devlist_mutex ->fs_reclaim ->pool_lock#2 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->&obj_hash[i].lock ->running_helpers_waitq.lock ->subsys mutex#39 ->&c->lock ->&____s->seqcount ->&k->k_lock ->&genl_data->genl_data_mutex ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 3 BD: 4 +.+.: subsys mutex#39 ->&k->k_lock FD: 1 BD: 71 ....: &rfkill->lock FD: 3 BD: 10 +.+.: subsys mutex#40 ->&k->k_lock FD: 156 BD: 2 +.+.: (work_completion)(&rfkill->sync_work) ->rfkill_global_mutex ->rfkill_global_mutex.wait_lock ->&p->pi_lock ->&rq->__lock FD: 1 BD: 10 +.+.: rfkill_global_mutex.wait_lock FD: 1 BD: 1 +.+.: dma_heap_minors.xa_lock FD: 3 BD: 1 +.+.: subsys mutex#41 ->&k->k_lock FD: 1 BD: 1 +.+.: heap_list_lock FD: 1 BD: 1 ....: host_index_ida.xa_lock FD: 125 BD: 1 +.+.: scsi_sense_cache_mutex ->slab_mutex FD: 28 BD: 4 +.+.: subsys mutex#42 ->&rq->__lock FD: 3 BD: 1 +.+.: subsys mutex#43 ->&k->k_lock FD: 1 BD: 193 ....: &dev->power.wait_queue FD: 1 BD: 124 -.-.: &virtscsi_vq->vq_lock FD: 312 BD: 3 +.+.: &shost->scan_mutex ->fs_reclaim ->pool_lock#2 ->shost->host_lock ->&dev->power.lock ->&x->wait#9 ->&obj_hash[i].lock ->attribute_container_mutex ->blk_queue_ida.xa_lock ->pcpu_alloc_mutex ->&q->sysfs_lock ->&set->tag_list_lock ->batched_entropy_u32.lock ->&c->lock ->&zone->lock ->&____s->seqcount ->tk_core.seq.seqcount ->mmu_notifier_invalidate_range_start ->&hctx->lock ->&base->lock ->&x->wait#16 ->&rq->__lock ->(&timer.timer) ->&sdev->state_mutex ->&q->mq_freeze_lock ->&q->mq_freeze_wq ->percpu_ref_switch_lock ->(&q->timeout) ->(work_completion)(&q->timeout_work) ->(work_completion)(&(&q->requeue_work)->work) ->(work_completion)(&(&hctx->run_work)->work) ->cpu_hotplug_lock ->&xa->xa_lock#9 ->&q->unused_hctx_lock ->(work_completion)(&sdev->requeue_work) ->(work_completion)(&sdev->event_work) ->pcpu_lock ->&sdev->inquiry_mutex ->(console_sem).lock ->&cfs_rq->removed.lock ->quarantine_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&x->wait#15 ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#42 ->device_links_srcu ->async_lock ->gdp_mutex ->subsys mutex#44 ->bsg_minor_ida.xa_lock ->chrdevs_lock ->req_lock ->&p->pi_lock ->&x->wait#11 ->subsys mutex#57 FD: 1 BD: 4 ....: shost->host_lock FD: 2 BD: 3 +.+.: async_scan_lock ->&x->wait#15 FD: 1 BD: 5 ....: &x->wait#15 FD: 1 BD: 8 +.+.: &hctx->lock FD: 29 BD: 5 ..-.: &x->wait#16 ->&p->pi_lock FD: 154 BD: 4 +.+.: subsys mutex#44 ->&k->list_lock ->&k->k_lock ->fs_reclaim ->pool_lock#2 ->lock ->chrdevs_lock ->&x->wait#9 ->&obj_hash[i].lock ->gdp_mutex ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&rq->__lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#56 ->(console_sem).lock FD: 1 BD: 4 +.+.: &sdev->state_mutex FD: 31 BD: 4 +.-.: (&q->timeout) FD: 15 BD: 5 +.+.: (work_completion)(&q->timeout_work) ->&tags->lock ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 4 +.+.: (work_completion)(&sdev->requeue_work) FD: 1 BD: 4 +.+.: (work_completion)(&sdev->event_work) FD: 28 BD: 4 +.+.: &sdev->inquiry_mutex ->&rq->__lock FD: 1 BD: 1 +.+.: nvmf_hosts_mutex FD: 3 BD: 1 +.+.: subsys mutex#45 ->&k->k_lock FD: 1 BD: 1 +.+.: nvmf_transports_rwsem FD: 3 BD: 1 +.+.: subsys mutex#46 ->&k->k_lock FD: 96 BD: 3 +.+.: &default_group_class[depth - 1]#3/2 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->configfs_dirent_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#19 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&default_group_class[depth - 1]#4/2 FD: 1 BD: 1 +.+.: nvmet_config_sem FD: 3 BD: 1 +.+.: subsys mutex#47 ->&k->k_lock FD: 95 BD: 4 +.+.: &default_group_class[depth - 1]#4/2 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->configfs_dirent_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#19 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&default_group_class[depth - 1]#5/2 FD: 94 BD: 5 +.+.: &default_group_class[depth - 1]#5/2 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->configfs_dirent_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#19 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&default_group_class[depth - 1]#6 ->&default_group_class[depth - 1]#6/2 FD: 78 BD: 6 +.+.: &default_group_class[depth - 1]#6 ->fs_reclaim ->pool_lock#2 ->configfs_dirent_lock FD: 1 BD: 6 +.+.: &default_group_class[depth - 1]#6/2 FD: 1 BD: 1 +.+.: backend_mutex FD: 1 BD: 1 +.+.: scsi_mib_index_lock FD: 1 BD: 1 +.+.: hba_lock FD: 77 BD: 1 +.+.: device_mutex ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 +.+.: &hba->device_lock FD: 306 BD: 1 +.+.: mtd_table_mutex ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&rq->__lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#48 ->devtree_lock ->nvmem_ida.xa_lock ->nvmem_cell_mutex ->&k->k_lock ->subsys mutex#49 ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->&zone->lock ->(console_sem).lock ->pcpu_alloc_mutex ->cpu_hotplug_lock ->&n->list_lock ->batched_entropy_u32.lock ->mmu_notifier_invalidate_range_start ->blk_queue_ida.xa_lock ->&q->sysfs_lock ->&set->tag_list_lock ->bio_slab_lock ->percpu_counters_lock ->&sb->s_type->i_lock_key#3 ->&s->s_inode_list_lock ->&xa->xa_lock#8 ->&q->mq_freeze_lock ->set->srcu ->percpu_ref_switch_lock ->&q->queue_lock ->&bdev->bd_size_lock ->elv_list_lock ->(work_completion)(&(&q->requeue_work)->work) ->(work_completion)(&(&hctx->run_work)->work) ->&q->debugfs_mutex ->&cfs_rq->removed.lock ->subsys mutex#36 ->dev_hotplug_mutex ->&q->sysfs_dir_lock ->subsys mutex#37 ->cgwb_lock ->bdi_lock ->inode_hash_lock FD: 1 BD: 1 +.+.: part_parser_lock FD: 1 BD: 82 ....: (kmod_concurrent_max).lock FD: 29 BD: 84 ....: &x->wait#17 ->&p->pi_lock FD: 1 BD: 123 ....: &prev->lock FD: 3 BD: 2 +.+.: subsys mutex#48 ->&k->k_lock FD: 1 BD: 2 ....: nvmem_ida.xa_lock FD: 1 BD: 2 +.+.: nvmem_cell_mutex FD: 1 BD: 2 +.+.: subsys mutex#49 FD: 772 BD: 1 +.+.: (wq_completion)gid-cache-wq ->(work_completion)(&ndev_work->work) ->(work_completion)(&work->work) ->&rq->__lock FD: 770 BD: 2 +.+.: (work_completion)(&ndev_work->work) ->devices_rwsem ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock ->&meta->lock ->kfence_freelist_lock ->quarantine_lock ->&cfs_rq->removed.lock FD: 1 BD: 64 +.+.: &bond->stats_lock FD: 17 BD: 77 ....: lweventlist_lock ->pool_lock#2 ->&dir->lock#2 ->&c->lock ->&n->list_lock ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->&obj_hash[i].lock ->&base->lock ->&____s->seqcount#2 FD: 745 BD: 2 +.+.: (linkwatch_work).work ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock ->pool_lock#2 ->&obj_hash[i].lock ->stock_lock FD: 1 BD: 3934 +.+.: rtnl_mutex.wait_lock FD: 3 BD: 3925 ..-.: once_lock ->crngs.lock FD: 266 BD: 2 +.+.: (work_completion)(&w->work) ->cpu_hotplug_lock ->&obj_hash[i].lock FD: 28 BD: 64 ++++: (inet6addr_validator_chain).rwsem ->&rq->__lock FD: 28 BD: 64 ++++: (inetaddr_validator_chain).rwsem ->&rq->__lock FD: 3 BD: 1 +.+.: subsys mutex#50 ->&k->k_lock FD: 1 BD: 1 +.+.: mdio_board_lock FD: 1 BD: 1 +.+.: mode_list_lock FD: 1 BD: 64 +.+.: napi_hash_lock FD: 82 BD: 108 +.+.: xps_map_mutex ->fs_reclaim ->pool_lock#2 ->jump_label_mutex ->&obj_hash[i].lock ->krc.lock ->&rq->__lock FD: 1 BD: 2 +.+.: (work_completion)(&vi->config_work) FD: 1 BD: 1 +.+.: l3mdev_lock FD: 3 BD: 1 +.+.: subsys mutex#51 ->&k->k_lock FD: 2 BD: 1 +.+.: compressor_list_lock ->pool_lock#2 FD: 1 BD: 5 ....: hwsim_netgroup_ida.xa_lock FD: 34 BD: 3921 +.-.: hwsim_radio_lock ->pool_lock#2 ->&list->lock#19 ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount ->init_task.mems_allowed_seq.seqcount ->&zone->lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 3 BD: 4 +.+.: subsys mutex#52 ->&k->k_lock FD: 291 BD: 66 +.+.: &rdev->wiphy.mtx ->fs_reclaim ->pool_lock#2 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->&obj_hash[i].lock ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#53 ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->nl_table_lock ->nl_table_wait.lock ->reg_requests_lock ->stack_depot_init_mutex ->pcpu_alloc_mutex ->&local->iflist_mtx ->net_rwsem ->&x->wait#9 ->subsys mutex#17 ->&dir->lock#2 ->dev_hotplug_mutex ->dev_base_lock ->input_pool.lock ->batched_entropy_u32.lock ->&tbl->lock ->sysctl_lock ->&wdev->mtx ->&fq->lock ->quarantine_lock ->&rq->__lock ->&cfs_rq->removed.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&n->list_lock ->rlock-AF_NETLINK ->lweventlist_lock ->&pool->lock ->&data->mutex ->&base->lock ->&tn->lock ->failover_lock ->proc_subdir_lock ->proc_inum_ida.xa_lock ->&idev->mc_lock ->&pnettable->lock ->smc_ib_devices.mutex ->&wdev->event_lock ->&rdev->mgmt_registrations_lock ->(work_completion)(&(&sdata->dec_tailroom_needed_wk)->work) ->&local->key_mtx ->&dentry->d_lock ->&fsnotify_mark_srcu ->&sb->s_type->i_lock_key#7 ->&s->s_inode_list_lock ->&xa->xa_lock#7 ->mount_lock ->&rdev->wiphy_work_lock ->(&dwork->timer) ->(work_completion)(&(&link->color_collision_detect_work)->work) ->&local->chanctx_mtx ->rtnl_mutex.wait_lock ->&p->pi_lock ->&list->lock#18 ->&lock->wait_lock ->remove_cache_srcu ->lock#6 ->&____s->seqcount#2 ->&sem->wait_lock ->&local->queue_stop_reason_lock ->&local->mtx ->&local->sta_mtx ->_xmit_ETHER ->(&local->dynamic_ps_timer) ->(work_completion)(&local->dynamic_ps_enable_work) ->(work_completion)(&sdata->recalc_smps) ->(work_completion)(&link->csa_finalize_work) ->(work_completion)(&link->color_change_finalize_work) ->(work_completion)(&(&link->dfs_cac_timer_work)->work) ->&local->filter_lock ->&meta->lock FD: 3 BD: 67 +.+.: subsys mutex#53 ->&k->k_lock FD: 1 BD: 67 +.+.: reg_requests_lock FD: 1 BD: 67 +.+.: &local->iflist_mtx FD: 145 BD: 67 +.+.: &wdev->mtx ->&rdev->bss_lock ->&local->chanctx_mtx ->&rdev->wiphy_work_lock ->&ifibss->incomplete_lock ->(console_sem).lock ->console_owner_lock ->console_owner ->&rq->__lock ->&local->mtx ->fs_reclaim ->pool_lock#2 ->tk_core.seq.seqcount ->hrtimer_bases.lock ->&obj_hash[i].lock ->&base->lock ->&wdev->event_lock ->nl_table_lock ->nl_table_wait.lock ->&c->lock ->&____s->seqcount ->&list->lock#2 ->&n->list_lock ->&sta->lock ->&local->sta_mtx ->remove_cache_srcu ->&____s->seqcount#2 ->rcu_node_0 ->&rcu_state.expedited_wq ->quarantine_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->lweventlist_lock ->krc.lock ->&list->lock#18 ->(&ifibss->timer) ->&cfs_rq->removed.lock FD: 5 BD: 3926 +.-.: &fq->lock ->tk_core.seq.seqcount FD: 1 BD: 3 ....: sd_index_ida.xa_lock FD: 3 BD: 3 +.+.: subsys mutex#54 ->&k->k_lock FD: 2 BD: 221 ....: sg_index_lock ->pool_lock#2 FD: 3 BD: 64 +.+.: subsys mutex#55 ->&k->k_lock FD: 3 BD: 5 +.+.: subsys mutex#56 ->&k->k_lock FD: 2 BD: 65 +.+.: &sdata->sec_mtx ->&sec->lock FD: 1 BD: 66 +...: &sec->lock FD: 1 BD: 4 ....: bsg_minor_ida.xa_lock FD: 395 BD: 64 +.+.: &local->iflist_mtx#2 ->netpoll_srcu ->net_rwsem ->&pn->hash_lock ->&tn->lock ->&dev->tx_global_lock ->&obj_hash[i].lock ->&rq->__lock ->dev->qdisc_tx_busylock ?: &qdisc_tx_busylock ->&sch->q.lock ->&wq->mutex ->fs_reclaim ->pool_lock#2 ->&c->lock ->nl_table_lock ->rlock-AF_NETLINK ->nl_table_wait.lock ->__ip_vs_mutex ->netlbl_unlhsh_lock ->&rdev->dev_wait ->&im->lock ->krc.lock ->&tbl->lock ->class ->(&tbl->proxy_timer) ->&base->lock ->flowtable_lock ->&dir->lock ->nr_list_lock ->nr_neigh_list_lock ->dev_base_lock ->cpu_hotplug_lock ->&dir->lock#2 ->bpf_devs_lock ->&in_dev->mc_tomb_lock ->sysctl_lock ->&ul->lock ->&net->xdp.lock ->mirred_list_lock ->&nft_net->commit_mutex ->&pnn->pndevs.lock ->&pnn->routes.lock ->&pnettable->lock ->smc_ib_devices.mutex ->proc_subdir_lock ->&ent->pde_unload_lock ->proc_inum_ida.xa_lock ->&net->ipv6.addrconf_hash_lock ->&ndev->lock ->&idev->mc_lock ->&idev->mc_query_lock ->(work_completion)(&(&idev->mc_report_work)->work) ->&idev->mc_report_lock ->devices_lock ->&____s->seqcount ->target_list_lock ->&dev_addr_list_lock_key#5 ->&root->kernfs_rwsem ->uevent_sock_mutex ->sysfs_symlink_target_lock ->&k->list_lock ->kernfs_idr_lock ->dev_hotplug_mutex ->dev_pm_qos_sysfs_mtx ->subsys mutex#17 ->&x->wait#9 ->dpm_list_mtx ->&dev->power.lock ->deferred_probe_mutex ->device_links_lock ->mmu_notifier_invalidate_range_start ->gdp_mutex ->&n->list_lock ->_xmit_IEEE802154 ->&sem->wait_lock ->&p->pi_lock ->&k->k_lock FD: 746 BD: 4 +.+.: hwsim_phys_lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->&wq->mutex ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->rcu_state.barrier_mutex ->dev_base_lock ->lweventlist_lock ->&dir->lock#2 ->netdev_unregistering_wq.lock ->wq_mayday_lock ->&x->wait ->wq_pool_mutex ->&pool->lock/1 FD: 3 BD: 4 +.+.: subsys mutex#57 ->&k->k_lock FD: 1 BD: 130 +.+.: &dd->lock FD: 29 BD: 4468 ..-.: &folio_wait_table[i] ->&p->pi_lock FD: 24 BD: 1 +.+.: (wq_completion)kblockd ->(work_completion)(&(&hctx->run_work)->work) ->(work_completion)(&q->timeout_work) ->(work_completion)(&(&q->requeue_work)->work) FD: 77 BD: 1 +.+.: xdomain_lock ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 +.+.: ioctl_mutex FD: 1 BD: 1 +.+.: address_handler_list_lock FD: 1 BD: 1 +.+.: card_mutex FD: 3 BD: 1 +.+.: subsys mutex#58 ->&k->k_lock FD: 29 BD: 1 ....: &x->wait#18 ->&p->pi_lock FD: 31 BD: 2 ..-.: &txlock ->&list->lock#3 ->&txwq FD: 1 BD: 3 ..-.: &list->lock#3 FD: 29 BD: 3 ..-.: &txwq ->&p->pi_lock FD: 2 BD: 1 ....: &iocq[i].lock ->&ktiowq[i] FD: 1 BD: 2 ....: &ktiowq[i] FD: 1 BD: 1 ....: rcu_read_lock_bh FD: 1 BD: 3893 +.-.: noop_qdisc.q.lock FD: 3 BD: 3 +.+.: subsys mutex#59 ->&k->k_lock FD: 197 BD: 1 +.+.: usb_bus_idr_lock ->(usb_notifier_list).rwsem ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->hcd_root_hub_lock ->&obj_hash[i].lock ->&rq->__lock ->&x->wait#19 ->&dev->power.lock ->device_links_srcu ->&zone->lock ->(console_sem).lock ->input_pool.lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&(&priv->bus_notifier)->rwsem ->uevent_sock_mutex ->running_helpers_waitq.lock ->device_state_lock ->&dum_hcd->dum->lock ->subsys mutex#60 ->&x->wait#9 ->&lock->wait_lock ->&hub->irq_urb_lock ->(&hub->irq_urb_retry) ->&base->lock ->hcd_urb_unlink_lock ->(work_completion)(&hub->tt.clear_work) ->hcd_urb_list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&cfs_rq->removed.lock ->&vhci_hcd->vhci->lock ->quarantine_lock FD: 140 BD: 1 +.+.: table_lock ->&k->list_lock ->fs_reclaim ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&k->k_lock ->uevent_sock_mutex ->&obj_hash[i].lock ->running_helpers_waitq.lock ->(console_sem).lock ->&rq->__lock ->&cfs_rq->removed.lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 1 BD: 3 +.+.: mon_lock FD: 141 BD: 2 +.+.: usb_port_peer_mutex ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->&dev->power.lock ->dpm_list_mtx ->&k->k_lock ->dev_pm_qos_mtx ->component_mutex ->device_links_srcu ->dev_pm_qos_sysfs_mtx ->&rq->__lock ->&zone->lock ->sysfs_symlink_target_lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 1 BD: 2 ....: device_state_lock FD: 31 BD: 6 ....: hcd_root_hub_lock ->hcd_urb_list_lock ->&bh->lock ->&p->pi_lock FD: 1 BD: 7 ....: hcd_urb_list_lock FD: 1 BD: 7 ..-.: &bh->lock FD: 4 BD: 86 ..-.: lock#6 ->kcov_remote_lock ->&kcov->lock FD: 2 BD: 124 ..-.: kcov_remote_lock ->pool_lock#2 FD: 29 BD: 6 ..-.: &x->wait#19 ->&p->pi_lock FD: 1 BD: 2 +.+.: set_config_lock FD: 86 BD: 2 +.+.: hcd->bandwidth_mutex ->devtree_lock ->&obj_hash[i].lock ->&x->wait#9 ->&dev->power.lock ->fs_reclaim ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->hcd_root_hub_lock ->&rq->__lock ->&x->wait#19 ->&c->lock ->&zone->lock ->&____s->seqcount ->&base->lock FD: 1 BD: 2 +.+.: &new_driver->dynids.lock FD: 1 BD: 5 ....: &dum_hcd->dum->lock FD: 84 BD: 4 +.+.: &hub->status_mutex ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&rq->__lock ->hcd_root_hub_lock ->fs_reclaim ->&dum_hcd->dum->lock ->&obj_hash[i].lock ->&x->wait#19 ->&base->lock ->&pool->lock ->(&timer.timer) ->&vhci_hcd->vhci->lock ->&c->lock ->&zone->lock ->&____s->seqcount FD: 1 BD: 3 +.+.: component_mutex FD: 35 BD: 1 +.+.: (wq_completion)pm ->(work_completion)(&dev->power.work) FD: 34 BD: 2 +.+.: (work_completion)(&dev->power.work) ->&dev->power.lock FD: 86 BD: 2 +.+.: (work_completion)(&(&hub->init_work)->work) FD: 1 BD: 2 +.+.: subsys mutex#60 FD: 38 BD: 1 +.+.: (wq_completion)usb_hub_wq ->(work_completion)(&hub->events) FD: 37 BD: 2 +.+.: (work_completion)(&hub->events) ->lock#6 ->&dev->power.lock FD: 1 BD: 2 ....: &hub->irq_urb_lock FD: 1 BD: 2 ....: (&hub->irq_urb_retry) FD: 1 BD: 2 ....: hcd_urb_unlink_lock FD: 1 BD: 1 ..-.: usb_kill_urb_queue.lock FD: 1 BD: 2 +.+.: (work_completion)(&hub->tt.clear_work) FD: 31 BD: 1 ..-.: lib/debugobjects.c:101 FD: 28 BD: 2 +.+.: (debug_obj_work).work ->pool_lock#2 ->&rq->__lock FD: 1 BD: 8 +.+.: udc_lock FD: 3 BD: 1 +.+.: subsys mutex#61 ->&k->k_lock FD: 1 BD: 1 ....: gadget_id_numbers.xa_lock FD: 104 BD: 2 +.+.: (work_completion)(&gadget->work) ->&root->kernfs_rwsem ->kernfs_notify_lock FD: 31 BD: 124 ....: kernfs_notify_lock FD: 65 BD: 2 +.+.: kernfs_notify_work ->kernfs_notify_lock ->&root->kernfs_supers_rwsem FD: 63 BD: 7 ++++: &root->kernfs_supers_rwsem ->inode_hash_lock FD: 1 BD: 1 +.+.: subsys mutex#62 FD: 1 BD: 1 +.+.: func_lock FD: 1 BD: 1 +.+.: g_tf_lock FD: 1 BD: 5 ....: &vhci_hcd->vhci->lock FD: 31 BD: 1 ..-.: net/core/link_watch.c:31 FD: 36 BD: 7 -.-.: i8042_lock ->(console_sem).lock ->&x->wait#20 FD: 29 BD: 8 -...: &x->wait#20 ->&p->pi_lock FD: 1 BD: 79 +.+.: &ent->pde_unload_lock FD: 31 BD: 4 ....: serio_event_lock ->pool_lock#2 FD: 217 BD: 1 +.+.: (wq_completion)events_long ->serio_event_work ->(work_completion)(&(&ipvs->defense_work)->work) ->(work_completion)(&(&br->gc_work)->work) ->&rq->__lock ->(work_completion)(&br->mcast_gc_work) ->(work_completion)(&(&ipvs->est_reload_work)->work) FD: 192 BD: 2 +.+.: serio_event_work ->serio_mutex FD: 191 BD: 3 +.+.: serio_mutex ->serio_event_lock ->i8042_lock ->fs_reclaim ->pool_lock#2 ->&k->list_lock ->lock ->&root->kernfs_rwsem ->&device->physical_node_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->semaphore->lock ->&obj_hash[i].lock ->sysfs_symlink_target_lock ->&k->k_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#63 ->bus_type_sem FD: 1 BD: 4 +.+.: subsys mutex#63 FD: 2 BD: 7 ....: input_ida.xa_lock ->pool_lock#2 FD: 38 BD: 7 +.+.: &mousedev->mutex/1 ->&mousedev->mutex#2 FD: 178 BD: 4 +.+.: &serio->drv_mutex ->fs_reclaim ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount ->&x->wait#9 ->&obj_hash[i].lock ->&serio->lock ->i8042_mutex ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#30 ->(console_sem).lock ->input_mutex ->i8042_lock ->psmouse_mutex FD: 36 BD: 7 -.-.: &serio->lock ->&ps2dev->wait ->&dev->power.lock ->&dev->event_lock#2 FD: 46 BD: 6 +.+.: i8042_mutex ->&serio->lock ->i8042_lock ->&ps2dev->wait ->&obj_hash[i].lock ->&base->lock ->&pool->lock ->&rq->__lock ->(&timer.timer) FD: 29 BD: 8 -.-.: &ps2dev->wait ->&p->pi_lock FD: 1 BD: 1 ....: rtc_ida.xa_lock FD: 2 BD: 1 +.+.: &rtc->ops_lock ->rtc_lock FD: 1 BD: 2 ....: platform_devid_ida.xa_lock FD: 1 BD: 2 ....: rtcdev_lock FD: 77 BD: 1 +.+.: g_smscore_deviceslock ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 +.+.: cx231xx_devlist_mutex FD: 153 BD: 7 +.+.: &led_cdev->led_access ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&zone->lock ->&____s->seqcount ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#64 ->leds_list_lock ->triggers_list_lock FD: 3 BD: 8 +.+.: subsys mutex#64 ->&k->k_lock FD: 91 BD: 19 +.+.: &led_cdev->trigger_lock ->fs_reclaim ->pool_lock#2 ->&trig->leddev_list_lock ->uevent_sock_mutex ->&obj_hash[i].lock ->running_helpers_waitq.lock ->&c->lock ->&zone->lock ->&____s->seqcount FD: 1 BD: 20 +.+.: &trig->leddev_list_lock FD: 1 BD: 22 -...: &dev->event_lock#2 FD: 1 BD: 1 +.+.: em28xx_devlist_mutex FD: 177 BD: 5 +.+.: psmouse_mutex ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&zone->lock ->&____s->seqcount ->&serio->lock ->i8042_mutex ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->&rq->__lock ->&k->k_lock ->subsys mutex#30 ->(console_sem).lock ->console_owner_lock ->console_owner ->input_mutex FD: 1 BD: 1 ....: pvr2_context_sync_data.lock FD: 1 BD: 15 +.+.: i2c_dev_list_lock FD: 3 BD: 8 +.+.: subsys mutex#65 ->&k->k_lock FD: 1 BD: 1 +.+.: subsys mutex#66 FD: 156 BD: 2 +.+.: dvbdev_register_lock ->(console_sem).lock ->fs_reclaim ->pool_lock#2 ->minor_rwsem ->&xa->xa_lock#10 ->&mdev->graph_mutex ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&rq->__lock ->&x->wait#11 ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#67 ->&zone->lock FD: 157 BD: 1 +.+.: frontend_mutex ->fs_reclaim ->pool_lock#2 ->(console_sem).lock ->dvbdev_register_lock FD: 1 BD: 3 +.+.: minor_rwsem FD: 2 BD: 3 ....: &xa->xa_lock#10 ->pool_lock#2 FD: 77 BD: 4 +.+.: &mdev->graph_mutex ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock FD: 3 BD: 3 +.+.: subsys mutex#67 ->&k->k_lock FD: 1 BD: 1 ....: &dmxdev->lock FD: 1 BD: 1 +.+.: &dvbdemux->mutex FD: 1 BD: 1 +.+.: media_devnode_lock FD: 1 BD: 1 +.+.: subsys mutex#68 FD: 1 BD: 1 +.+.: videodev_lock FD: 3 BD: 1 +.+.: subsys mutex#69 ->&k->k_lock FD: 1 BD: 1 +.+.: vimc_sensor:393:(&vsensor->hdl)->_lock FD: 1 BD: 1 +.+.: &v4l2_dev->lock FD: 1 BD: 1 +.+.: vimc_debayer:578:(&vdebayer->hdl)->_lock FD: 1 BD: 1 +.+.: vimc_lens:61:(&vlens->hdl)->_lock FD: 87 BD: 1 +.+.: vivid_ctrls:1606:(hdl_user_gen)->_lock ->vivid_ctrls:1620:(hdl_vid_cap)->_lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->vivid_ctrls:1622:(hdl_vid_out)->_lock ->vivid_ctrls:1625:(hdl_vbi_cap)->_lock ->vivid_ctrls:1627:(hdl_vbi_out)->_lock ->vivid_ctrls:1630:(hdl_radio_rx)->_lock ->vivid_ctrls:1632:(hdl_radio_tx)->_lock ->vivid_ctrls:1634:(hdl_sdr_cap)->_lock ->vivid_ctrls:1636:(hdl_meta_cap)->_lock ->&zone->lock ->vivid_ctrls:1638:(hdl_meta_out)->_lock ->vivid_ctrls:1640:(hdl_tch_cap)->_lock ->&obj_hash[i].lock FD: 78 BD: 1 +.+.: vivid_ctrls:1608:(hdl_user_vid)->_lock ->vivid_ctrls:1620:(hdl_vid_cap)->_lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount FD: 81 BD: 1 +.+.: vivid_ctrls:1610:(hdl_user_aud)->_lock ->vivid_ctrls:1620:(hdl_vid_cap)->_lock ->fs_reclaim ->pool_lock#2 ->vivid_ctrls:1622:(hdl_vid_out)->_lock ->vivid_ctrls:1630:(hdl_radio_rx)->_lock ->vivid_ctrls:1632:(hdl_radio_tx)->_lock ->&c->lock ->&zone->lock ->&____s->seqcount FD: 85 BD: 1 +.+.: vivid_ctrls:1612:(hdl_streaming)->_lock ->vivid_ctrls:1620:(hdl_vid_cap)->_lock ->fs_reclaim ->pool_lock#2 ->vivid_ctrls:1622:(hdl_vid_out)->_lock ->vivid_ctrls:1625:(hdl_vbi_cap)->_lock ->vivid_ctrls:1627:(hdl_vbi_out)->_lock ->vivid_ctrls:1634:(hdl_sdr_cap)->_lock ->vivid_ctrls:1636:(hdl_meta_cap)->_lock ->vivid_ctrls:1638:(hdl_meta_out)->_lock ->vivid_ctrls:1640:(hdl_tch_cap)->_lock ->&c->lock ->&____s->seqcount ->&zone->lock FD: 79 BD: 1 +.+.: vivid_ctrls:1614:(hdl_sdtv_cap)->_lock ->vivid_ctrls:1620:(hdl_vid_cap)->_lock ->fs_reclaim ->pool_lock#2 ->vivid_ctrls:1625:(hdl_vbi_cap)->_lock ->&rq->__lock ->&c->lock ->&____s->seqcount ->&zone->lock FD: 79 BD: 1 +.+.: vivid_ctrls:1616:(hdl_loop_cap)->_lock ->vivid_ctrls:1620:(hdl_vid_cap)->_lock ->fs_reclaim ->pool_lock#2 ->vivid_ctrls:1625:(hdl_vbi_cap)->_lock FD: 1 BD: 1 +.+.: vivid_ctrls:1618:(hdl_fb)->_lock FD: 1 BD: 7 +.+.: vivid_ctrls:1620:(hdl_vid_cap)->_lock FD: 1 BD: 4 +.+.: vivid_ctrls:1622:(hdl_vid_out)->_lock FD: 1 BD: 5 +.+.: vivid_ctrls:1625:(hdl_vbi_cap)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1627:(hdl_vbi_out)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1630:(hdl_radio_rx)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1632:(hdl_radio_tx)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1634:(hdl_sdr_cap)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1636:(hdl_meta_cap)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1638:(hdl_meta_out)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1640:(hdl_tch_cap)->_lock FD: 1 BD: 1 ....: &adap->kthread_waitq FD: 1 BD: 1 +.+.: cec_devnode_lock FD: 1 BD: 1 +.+.: subsys mutex#70 FD: 6 BD: 1 +.+.: &adap->lock ->tk_core.seq.seqcount ->&adap->devnode.lock_fhs FD: 1 BD: 2 +.+.: &adap->devnode.lock_fhs FD: 1 BD: 1 +.+.: &dev->cec_xfers_slock FD: 1 BD: 1 ....: &dev->kthread_waitq_cec FD: 1 BD: 1 ....: ptp_clocks_map.xa_lock FD: 3 BD: 1 +.+.: subsys mutex#71 ->&k->k_lock FD: 1 BD: 1 +.+.: pers_lock FD: 1 BD: 1 +.+.: _lock FD: 1 BD: 3 +.+.: dm_bufio_clients_lock FD: 1 BD: 1 +.+.: _ps_lock FD: 1 BD: 1 +.+.: _lock#2 FD: 1 BD: 1 +.+.: _lock#3 FD: 1 BD: 1 +.+.: register_lock#2 FD: 3 BD: 1 +.+.: subsys mutex#72 ->&k->k_lock FD: 1 BD: 1 .+.+: bp_lock FD: 3 BD: 1 +.+.: subsys mutex#73 ->&k->k_lock FD: 17 BD: 1 +.-.: (&dsp_spl_tl) ->dsp_lock FD: 16 BD: 2 ..-.: dsp_lock ->iclock_lock ->&obj_hash[i].lock ->&base->lock FD: 5 BD: 3 ...-: iclock_lock ->tk_core.seq.seqcount FD: 84 BD: 66 +.+.: lock#7 ->fs_reclaim ->pool_lock#2 ->&xa->xa_lock#12 ->&rq->__lock ->crngs.lock ->&xa->xa_lock#17 ->&id_priv->qp_mutex ->&id_priv->lock ->&xa->xa_lock#18 ->&cm_id_priv->lock ->&c->lock ->&n->list_lock FD: 1 BD: 1 +.+.: intf_mutex FD: 1 BD: 1 ....: iscsi_transport_lock FD: 3 BD: 1 +.+.: subsys mutex#74 ->&k->k_lock FD: 1 BD: 1 ....: &tx_task->waiting FD: 773 BD: 2 ++++: link_ops_rwsem ->fs_reclaim ->&c->lock ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->(console_sem).lock ->&pdata->netdev_lock ->ndev_hash_lock ->crypto_alg_sem ->devices_rwsem ->&rxe->usdev_lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->&device->cache_lock ->rdmacg_mutex ->&____s->seqcount ->&k->list_lock ->gdp_mutex ->gdp_mutex.wait_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&____s->seqcount#2 ->&dev->power.lock ->dpm_list_mtx ->subsys mutex#83 ->&zone->lock ->&n->list_lock ->&sem->wait_lock ->rcu_node_0 ->remove_cache_srcu ->&rcu_state.expedited_wq ->uevent_sock_mutex FD: 157 BD: 1 +.+.: disable_lock ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->&c->lock ->&____s->seqcount ->sysfs_symlink_target_lock ->&k->k_lock ->&dev->power.lock ->dpm_list_mtx ->&(&priv->bus_notifier)->rwsem ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#3 FD: 3 BD: 1 +.+.: subsys mutex#75 ->&k->k_lock FD: 1 BD: 1 +.+.: service_lock FD: 33 BD: 1 ..-.: drivers/block/floppy.c:640 FD: 38 BD: 1 +.+.: (fd_timeout).work ->&obj_hash[i].lock ->floppy_work ->dma_spin_lock ->floppy_lock ->command_done.lock FD: 1 BD: 3 ....: fdc_wait.lock FD: 1 BD: 3 ....: (&motor_off_timer[drive]) FD: 1 BD: 270 ....: (&sq->pending_timer) FD: 1 BD: 3 +.+.: (work_completion)(&td->dispatch_work) FD: 39 BD: 5 +.+.: &q->blkcg_mutex ->&q->queue_lock ->&obj_hash[i].lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 FD: 1 BD: 1 ..-.: percpu_ref_switch_waitq.lock FD: 42 BD: 2 +.+.: (work_completion)(&blkg->free_work) ->&q->blkcg_mutex ->&obj_hash[i].lock ->pool_lock#2 ->&xa->xa_lock#9 ->pcpu_lock ->blk_queue_ida.xa_lock ->percpu_ref_switch_lock FD: 1 BD: 1 +.+.: comedi_drivers_list_lock FD: 3 BD: 6 +.+.: subsys mutex#76 ->&k->k_lock FD: 137 BD: 2 ++++: snd_ctl_layer_rwsem ->snd_ctl_led_mutex ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->&c->lock ->&____s->seqcount ->lock ->&root->kernfs_rwsem ->bus_type_sem ->&dev->power.lock ->dpm_list_mtx ->&k->k_lock ->sysfs_symlink_target_lock ->&zone->lock FD: 1 BD: 3 +.+.: snd_card_mutex FD: 1 BD: 1 +.+.: snd_ioctl_rwsem FD: 77 BD: 2 +.+.: strings ->fs_reclaim ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount FD: 1 BD: 2 +.+.: register_mutex FD: 152 BD: 3 +.+.: sound_mutex ->fs_reclaim ->pool_lock#2 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&rq->__lock ->&x->wait#11 ->&obj_hash[i].lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#76 ->&c->lock ->&____s->seqcount ->&cfs_rq->removed.lock ->&zone->lock ->&k->k_lock FD: 162 BD: 1 +.+.: register_mutex#2 ->fs_reclaim ->pool_lock#2 ->sound_mutex ->&obj_hash[i].lock ->register_mutex ->&c->lock ->&____s->seqcount ->sound_oss_mutex ->strings ->&zone->lock ->&entry->access ->info_mutex FD: 154 BD: 1 +.+.: register_mutex#3 ->fs_reclaim ->pool_lock#2 ->sound_mutex ->clients_lock ->&c->lock ->&____s->seqcount FD: 1 BD: 5 ....: clients_lock FD: 2 BD: 1 +.+.: &client->ports_mutex ->&client->ports_lock FD: 1 BD: 5 .+.+: &client->ports_lock FD: 155 BD: 1 +.+.: register_mutex#4 ->fs_reclaim ->pool_lock#2 ->sound_oss_mutex ->&c->lock ->&____s->seqcount FD: 154 BD: 3 +.+.: sound_oss_mutex ->fs_reclaim ->pool_lock#2 ->sound_loader_lock ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&rq->__lock ->&x->wait#11 ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#76 ->&zone->lock ->&k->k_lock ->&cfs_rq->removed.lock FD: 1 BD: 4 +.+.: sound_loader_lock FD: 80 BD: 1 .+.+: &grp->list_mutex/1 ->clients_lock ->&client->ports_lock ->register_lock#3 ->fs_reclaim ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount FD: 2 BD: 1 +.+.: &grp->list_mutex#2 ->&grp->list_lock FD: 1 BD: 2 ....: &grp->list_lock FD: 87 BD: 2 +.+.: async_lookup_work ->fs_reclaim ->pool_lock#2 ->clients_lock ->&client->ports_lock ->snd_card_mutex ->(kmod_concurrent_max).lock ->&obj_hash[i].lock ->&x->wait#17 ->&pool->lock ->&rq->__lock ->running_helpers_waitq.lock ->autoload_work ->&x->wait#10 FD: 1 BD: 2 ....: register_lock#3 FD: 140 BD: 1 ++++: &card->controls_rwsem ->&xa->xa_lock#11 ->fs_reclaim ->&card->ctl_files_rwlock ->snd_ctl_layer_rwsem ->pool_lock#2 ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 9 BD: 2 +.+.: &xa->xa_lock#11 ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&zone->lock FD: 1 BD: 2 ....: &card->ctl_files_rwlock FD: 4 BD: 3 +.+.: autoload_work ->&k->list_lock ->&k->k_lock FD: 1 BD: 3 +.+.: snd_ctl_led_mutex FD: 1 BD: 1 +.+.: register_mutex#5 FD: 1 BD: 67 +.+.: failover_lock FD: 6 BD: 2 +...: llc_sap_list_lock ->pool_lock#2 ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 77 BD: 1 +.+.: act_id_mutex ->fs_reclaim ->pool_lock#2 FD: 1 BD: 64 ++++: act_mod_lock FD: 1 BD: 1 +.+.: ife_mod_lock FD: 1 BD: 66 +.+.: nf_connlabels_lock FD: 1 BD: 64 ++++: cls_mod_lock FD: 1 BD: 1 +.+.: ematch_mod_lock FD: 333 BD: 2 +.+.: sock_diag_table_mutex ->nlk_cb_mutex-SOCK_DIAG ->rlock-AF_NETLINK FD: 1 BD: 1 +.+.: nfnl_subsys_acct FD: 1 BD: 1 +.+.: nfnl_subsys_queue FD: 1 BD: 1 +.+.: nfnl_subsys_ulog FD: 28 BD: 5 +.+.: nf_log_mutex ->&rq->__lock FD: 1 BD: 1 +.+.: nfnl_subsys_osf FD: 35 BD: 1 +.+.: nf_sockopt_mutex ->&rq->__lock ->nf_sockopt_mutex.wait_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock ->rcu_node_0 FD: 85 BD: 2 +.+.: nfnl_subsys_ctnetlink ->(console_sem).lock ->console_owner_lock ->console_owner ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->nlk_cb_mutex-NETFILTER FD: 1 BD: 1 +.+.: nfnl_subsys_ctnetlink_exp FD: 1 BD: 5 +.+.: nf_ct_ecache_mutex FD: 1 BD: 1 +.+.: nfnl_subsys_cttimeout FD: 1 BD: 1 +.+.: nfnl_subsys_cthelper FD: 1 BD: 1 +.+.: nf_ct_helper_mutex FD: 1 BD: 1 +...: nf_conntrack_expect_lock FD: 36 BD: 3 +.+.: nf_conntrack_mutex ->&nf_conntrack_locks[i] ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->nf_conntrack_mutex.wait_lock ->&pool->lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 1 +.+.: nf_ct_nat_helpers_mutex FD: 1 BD: 1 +.+.: nfnl_subsys_nftables FD: 1 BD: 1 +.+.: nfnl_subsys_nftcompat FD: 883 BD: 1 +.+.: masq_mutex ->pernet_ops_rwsem ->(inetaddr_chain).rwsem ->inet6addr_chain.lock FD: 207 BD: 5 +.+.: &xt[i].mutex ->fs_reclaim ->pool_lock#2 ->&c->lock ->&n->list_lock ->&____s->seqcount ->&mm->mmap_lock ->free_vmap_area_lock ->vmap_area_lock ->&per_cpu(xt_recseq, i) ->&obj_hash[i].lock ->purge_vmap_area_lock ->&____s->seqcount#2 ->&rq->__lock ->init_mm.page_table_lock ->remove_cache_srcu ->rcu_node_0 ->&lock->wait_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&base->lock ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->quarantine_lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->stock_lock FD: 31 BD: 3942 +.+.: &tn->lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 3 BD: 1 +.+.: subsys mutex#77 ->&k->k_lock FD: 85 BD: 5 +.+.: nfnl_subsys_ipset ->fs_reclaim ->&c->lock ->pool_lock#2 ->stock_lock ->crngs.lock ->rcu_state.barrier_mutex ->ip_set_ref_lock ->&obj_hash[i].lock ->&lock->wait_lock ->&rq->__lock FD: 1 BD: 1 +.+.: ip_set_type_mutex FD: 91 BD: 68 +.+.: ipvs->est_mutex ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->pcpu_lock ->&obj_hash[i].lock ->&n->list_lock ->&rq->__lock ->pcpu_alloc_mutex.wait_lock ->&p->pi_lock ->kthread_create_lock ->&x->wait ->&pool->lock ->(console_sem).lock FD: 1 BD: 66 +.+.: ip_vs_sched_mutex FD: 77 BD: 5 +.+.: __ip_vs_app_mutex ->fs_reclaim ->pool_lock#2 ->&c->lock ->&n->list_lock ->&obj_hash[i].lock ->quarantine_lock ->&rq->__lock ->&____s->seqcount#2 ->&____s->seqcount FD: 1 BD: 1 +.+.: ip_vs_pe_mutex FD: 1 BD: 1 +.+.: tunnel4_mutex FD: 1 BD: 1 +.+.: xfrm4_protocol_mutex FD: 329 BD: 4 +.+.: inet_diag_table_mutex ->clock-AF_INET6 ->slock-AF_INET6 ->sk_lock-AF_INET6 ->&rq->__lock ->rcu_node_0 FD: 1 BD: 1 +...: xfrm_km_lock FD: 1 BD: 1 +...: xfrm_translator_lock FD: 1 BD: 1 +.+.: xfrm6_protocol_mutex FD: 1 BD: 1 +.+.: tunnel6_mutex FD: 1 BD: 1 +.+.: xfrm_if_cb_lock FD: 1 BD: 1 +...: inetsw6_lock FD: 1 BD: 7 +.+.: &hashinfo->lock#2 FD: 19 BD: 5 +.+.: &net->ipv6.ip6addrlbl_table.lock ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock FD: 158 BD: 3887 +.+.: &idev->mc_lock ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->&dev_addr_list_lock_key ->_xmit_ETHER ->&zone->lock ->batched_entropy_u32.lock ->&base->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&n->list_lock ->krc.lock ->&bridge_netdev_addr_lock_key ->&dev_addr_list_lock_key#2 ->&batadv_netdev_addr_lock_key ->&rq->__lock ->&vlan_netdev_addr_lock_key ->&macvlan_netdev_addr_lock_key ->&dev_addr_list_lock_key#3 ->remove_cache_srcu ->&bridge_netdev_addr_lock_key/1 ->&dev_addr_list_lock_key/1 ->pcpu_lock ->&dev_addr_list_lock_key#2/1 ->rcu_node_0 ->_xmit_ETHER/1 ->&batadv_netdev_addr_lock_key/1 ->&vlan_netdev_addr_lock_key/1 ->&macvlan_netdev_addr_lock_key/1 ->&pool->lock ->&dev_addr_list_lock_key#3/1 ->&macsec_netdev_addr_lock_key/1 ->&rcu_state.expedited_wq ->&____s->seqcount#2 ->&cfs_rq->removed.lock ->_xmit_ETHER/2 ->_xmit_IPGRE ->&dev_addr_list_lock_key#3/2 ->&lock->wait_lock ->pgd_lock ->stock_lock ->key ->percpu_counters_lock ->&macsec_netdev_addr_lock_key#2/2 FD: 19 BD: 3888 +...: &dev_addr_list_lock_key ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&n->list_lock ->&____s->seqcount#2 ->&obj_hash[i].lock ->krc.lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 41 BD: 3902 +...: _xmit_ETHER ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&local->filter_lock ->&n->list_lock ->(console_sem).lock ->&____s->seqcount#2 ->&obj_hash[i].lock ->krc.lock FD: 747 BD: 1 +.+.: (wq_completion)ipv6_addrconf ->(work_completion)(&(&net->ipv6.addr_chk_work)->work) ->(work_completion)(&(&ifa->dad_work)->work) ->&rq->__lock FD: 745 BD: 6 +.+.: (work_completion)(&(&net->ipv6.addr_chk_work)->work) ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock FD: 29 BD: 71 ....: &x->wait#21 ->&p->pi_lock FD: 50 BD: 3986 ++--: &ndev->lock ->&ifa->lock ->pool_lock#2 ->&dir->lock#2 ->pcpu_lock ->&obj_hash[i].lock ->&tb->tb6_lock ->&c->lock ->&____s->seqcount ->&n->list_lock ->batched_entropy_u32.lock ->&base->lock ->&____s->seqcount#2 FD: 7 BD: 1 +.+.: stp_proto_mutex ->llc_sap_list_lock FD: 1 BD: 1 ....: switchdev_notif_chain.lock FD: 28 BD: 64 ++++: (switchdev_blocking_notif_chain).rwsem ->&rq->__lock FD: 745 BD: 1 +.+.: br_ioctl_mutex ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock FD: 268 BD: 8 +.+.: nf_ct_proto_mutex ->defrag4_mutex ->nf_hook_mutex ->cpu_hotplug_lock ->&obj_hash[i].lock ->pool_lock#2 ->defrag6_mutex ->nf_hook_mutex.wait_lock ->&p->pi_lock ->&rq->__lock FD: 206 BD: 5 +.+.: ebt_mutex ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&rq->__lock ->ebt_mutex.wait_lock FD: 1 BD: 1 +.+.: dsa_tag_drivers_lock FD: 1 BD: 1 +...: protocol_list_lock FD: 1 BD: 1 +...: linkfail_lock FD: 1 BD: 1 +...: rose_neigh_list_lock FD: 1 BD: 1 +.+.: proto_tab_lock#2 FD: 1 BD: 24 ++++: chan_list_lock FD: 1 BD: 4 +.+.: l2cap_sk_list.lock FD: 3 BD: 3 +.+.: sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP ->slock-AF_BLUETOOTH-BTPROTO_L2CAP ->chan_list_lock FD: 1 BD: 6 +...: slock-AF_BLUETOOTH-BTPROTO_L2CAP FD: 1 BD: 1 ....: rfcomm_wq.lock FD: 1 BD: 1 +.+.: rfcomm_mutex FD: 1 BD: 1 +.+.: auth_domain_lock FD: 1 BD: 1 +.+.: registered_mechs_lock FD: 1 BD: 1 ....: atm_dev_notify_chain.lock FD: 1 BD: 1 +.+.: proto_tab_lock#3 FD: 747 BD: 1 +.+.: vlan_ioctl_mutex ->&mm->mmap_lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->vlan_ioctl_mutex.wait_lock ->&rq->__lock ->rcu_state.barrier_mutex ->dev_base_lock ->lweventlist_lock ->pcpu_lock ->pool_lock#2 ->&dir->lock#2 ->&obj_hash[i].lock ->krc.lock ->netdev_unregistering_wq.lock ->&cfs_rq->removed.lock ->&meta->lock ->kfence_freelist_lock FD: 1 BD: 1 +.+.: rds_info_lock FD: 87 BD: 7 ++++: rds_trans_sem ->(console_sem).lock ->&rq->__lock ->fs_reclaim ->&c->lock ->pool_lock#2 ->crngs.lock ->&id_priv->handler_mutex ->id_table_lock ->&x->wait#29 ->&obj_hash[i].lock FD: 1 BD: 71 ....: &id_priv->lock FD: 2 BD: 67 +.+.: &xa->xa_lock#12 ->pool_lock#2 FD: 130 BD: 78 +.+.: k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->&tcp_hashinfo.bhash[i].lock ->&h->lhash2[i].lock ->&table->hash[i].lock ->pool_lock#2 ->&____s->seqcount#8 ->&rq->__lock ->batched_entropy_u32.lock ->tk_core.seq.seqcount ->fs_reclaim ->&c->lock ->&obj_hash[i].lock ->&base->lock ->k-clock-AF_INET6 ->&queue->rskq_lock ->(kmod_concurrent_max).lock ->&n->list_lock ->&x->wait#17 ->running_helpers_waitq.lock ->&cfs_rq->removed.lock ->clock-AF_INET6 ->&dir->lock ->&____s->seqcount ->l2tp_ip6_lock FD: 70 BD: 81 +.-.: k-slock-AF_INET6 ->pool_lock#2 ->tk_core.seq.seqcount ->&c->lock ->&obj_hash[i].lock ->&tcp_hashinfo.bhash[i].lock ->elock-AF_INET6 ->&base->lock ->krc.lock ->&n->list_lock ->&____s->seqcount ->&hashinfo->ehash_locks[i] ->(&req->rsk_timer) ->&queue->rskq_lock ->k-clock-AF_INET6 ->key#25 ->crngs.lock ->&____s->seqcount#2 FD: 33 BD: 107 ++.-: k-clock-AF_INET6 FD: 37 BD: 104 +.-.: &tcp_hashinfo.bhash[i].lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&tcp_hashinfo.bhash2[i].lock ->k-clock-AF_INET6 ->clock-AF_INET ->clock-AF_INET6 ->stock_lock ->&obj_hash[i].lock ->k-clock-AF_INET ->&____s->seqcount#2 ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 36 BD: 105 +.-.: &tcp_hashinfo.bhash2[i].lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->k-clock-AF_INET6 ->clock-AF_INET ->clock-AF_INET6 ->&obj_hash[i].lock ->batched_entropy_u8.lock ->&hashinfo->ehash_locks[i] ->stock_lock ->once_lock ->k-clock-AF_INET ->&____s->seqcount#2 FD: 1 BD: 85 +.+.: &h->lhash2[i].lock FD: 1 BD: 5 +...: &list->lock#4 FD: 1 BD: 6 +...: k-clock-AF_TIPC FD: 36 BD: 5 +.+.: k-sk_lock-AF_TIPC ->k-slock-AF_TIPC ->&tn->nametbl_lock ->&rq->__lock ->&obj_hash[i].lock ->k-clock-AF_TIPC FD: 1 BD: 6 +...: k-slock-AF_TIPC FD: 21 BD: 6 +...: &tn->nametbl_lock ->pool_lock#2 ->&service->lock ->&c->lock ->&nt->cluster_scope_lock ->&obj_hash[i].lock ->krc.lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock FD: 19 BD: 7 +...: &service->lock ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->krc.lock FD: 28 BD: 70 +.+.: &pnettable->lock ->&rq->__lock FD: 28 BD: 70 +.+.: smc_ib_devices.mutex ->&rq->__lock FD: 1 BD: 1 +.+.: smc_wr_rx_hash_lock FD: 1 BD: 1 +.+.: v9fs_trans_lock FD: 1 BD: 5 +...: &this->receive_lock FD: 1 BD: 1 +...: lowpan_nhc_lock FD: 284 BD: 7 +.+.: ovs_mutex ->(work_completion)(&data->gc_work) ->nf_ct_proto_mutex ->&obj_hash[i].lock ->pool_lock#2 ->nf_connlabels_lock ->net_rwsem ->&rq->__lock FD: 266 BD: 9 +.+.: defrag4_mutex ->nf_hook_mutex ->cpu_hotplug_lock ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock FD: 266 BD: 9 +.+.: defrag6_mutex ->nf_hook_mutex ->cpu_hotplug_lock ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock FD: 1 BD: 110 +.+.: subsys mutex#78 FD: 31 BD: 1 ..-.: &(&gc_work->dwork)->timer FD: 38 BD: 2 +.+.: (work_completion)(&(&gc_work->dwork)->work) ->rcu_node_0 ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->&base->lock ->&rcu_state.expedited_wq FD: 1 BD: 3967 ...-: &____s->seqcount#7 FD: 31 BD: 1 ..-.: &(&ipvs->defense_work)->timer FD: 33 BD: 6 +.+.: (work_completion)(&(&ipvs->defense_work)->work) ->&s->s_inode_list_lock ->&ipvs->dropentry_lock ->&ipvs->droppacket_lock ->&ipvs->securetcp_lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->rcu_node_0 FD: 1 BD: 7 +...: &ipvs->dropentry_lock FD: 1 BD: 7 +...: &ipvs->droppacket_lock FD: 1 BD: 7 +...: &ipvs->securetcp_lock FD: 14 BD: 5 +.-.: (&net->can.stattimer) ->&obj_hash[i].lock ->&base->lock FD: 21 BD: 1 +.-.: (&vblank->disable_timer) ->&dev->vbl_lock FD: 38 BD: 2 +.+.: drain_vmap_work ->vmap_purge_lock FD: 9 BD: 221 +...: map_idr_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 1 BD: 1 ....: rcu_read_lock_sched FD: 12 BD: 221 +...: prog_idr_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 1 BD: 2 +...: bpf_lock FD: 1 BD: 1 ....: rcu_read_lock_trace FD: 9 BD: 221 +...: btf_idr_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock FD: 1 BD: 1 +.+.: &map->freeze_mutex FD: 1 BD: 6 +.+.: ima_keys_lock FD: 80 BD: 109 +.+.: scomp_lock ->fs_reclaim ->&c->lock ->&n->list_lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->init_mm.page_table_lock FD: 4 BD: 1 +.+.: pcpu_drain_mutex ->&pcp->lock FD: 349 BD: 5 +.+.: k-sk_lock-AF_RXRPC ->k-slock-AF_RXRPC ->&rxnet->local_mutex ->&local->services_lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&rx->incoming_lock ->&obj_hash[i].lock ->&rxnet->conn_lock ->&rq->__lock ->&call->waitq ->(rxrpc_call_limiter).lock ->&rx->recvmsg_lock ->&rx->call_lock ->&rxnet->call_lock ->(&call->timer) ->&base->lock ->&list->lock#23 ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock FD: 1 BD: 6 +...: k-slock-AF_RXRPC FD: 336 BD: 7 +.+.: &rxnet->local_mutex ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->crngs.lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&____s->seqcount ->&c->lock ->&dir->lock ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->cpu_hotplug_lock ->&rq->__lock ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&zone->lock ->&x->wait#22 ->&n->list_lock ->stock_lock ->k-sk_lock-AF_INET ->k-slock-AF_INET ->&____s->seqcount#2 ->&table->hash[i].lock ->k-clock-AF_INET6 ->&xa->xa_lock#7 ->&fsnotify_mark_srcu FD: 13 BD: 84 +...: &table->hash[i].lock ->k-clock-AF_INET6 ->&table->hash2[i].lock ->k-clock-AF_INET ->clock-AF_INET ->clock-AF_INET6 FD: 1 BD: 85 +...: &table->hash2[i].lock FD: 266 BD: 2 +.+.: netstamp_work ->cpu_hotplug_lock FD: 29 BD: 8 ....: &x->wait#22 ->&p->pi_lock FD: 1 BD: 6 +.+.: &local->services_lock FD: 1 BD: 10 +.+.: &rxnet->conn_lock FD: 1 BD: 6 ....: &call->waitq FD: 1 BD: 6 +.+.: &rx->call_lock FD: 1 BD: 6 +.+.: &rxnet->call_lock FD: 33 BD: 5 +.-.: (&rxnet->peer_keepalive_timer) FD: 18 BD: 7 +.+.: (wq_completion)krxrpcd ->(work_completion)(&rxnet->peer_keepalive_work) ->(work_completion)(&rxnet->service_conn_reaper) FD: 15 BD: 8 +.+.: (work_completion)(&rxnet->peer_keepalive_work) ->&rxnet->peer_hash_lock ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 9 +.+.: &rxnet->peer_hash_lock FD: 85 BD: 1 +.+.: init_user_ns.keyring_sem ->key_user_lock ->root_key_user.lock ->fs_reclaim ->pool_lock#2 ->crngs.lock ->key_serial_lock ->key_construction_mutex ->&type->lock_class ->keyring_serialise_link_lock FD: 1 BD: 5 +.+.: root_key_user.lock FD: 1 BD: 6 +.+.: keyring_name_lock FD: 1 BD: 1 +.+.: template_list FD: 1 BD: 1 +.+.: idr_lock FD: 76 BD: 9 +.+.: ima_extend_list_mutex ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&n->list_lock ->&rq->__lock ->&cfs_rq->removed.lock ->rcu_node_0 ->ima_extend_list_mutex.wait_lock ->&____s->seqcount#2 ->remove_cache_srcu FD: 1 BD: 1 +.+.: clk_debug_lock FD: 29 BD: 4 +.+.: deferred_probe_work ->deferred_probe_mutex FD: 80 BD: 64 ++++: &(&net->nexthop.notifier_chain)->rwsem ->&data->nh_lock FD: 136 BD: 72 +.+.: k-sk_lock-AF_INET ->k-slock-AF_INET ->&table->hash[i].lock ->&rq->__lock ->&____s->seqcount#8 ->batched_entropy_u32.lock ->&obj_hash[i].lock ->k-clock-AF_INET ->&tcp_hashinfo.bhash[i].lock ->&h->lhash2[i].lock ->pool_lock#2 ->stock_lock ->&hashinfo->ehash_locks[i] ->tk_core.seq.seqcount ->batched_entropy_u16.lock ->fs_reclaim ->&____s->seqcount ->rcu_node_0 ->&base->lock ->&c->lock ->slock-AF_INET ->&n->list_lock ->&____s->seqcount#2 ->(&tw->tw_timer) ->&cfs_rq->removed.lock ->remove_cache_srcu ->&rcu_state.expedited_wq ->batched_entropy_u8.lock ->kfence_freelist_lock ->quarantine_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&token_hash[i].lock ->crngs.lock FD: 54 BD: 76 +.-.: k-slock-AF_INET ->pool_lock#2 ->&c->lock ->batched_entropy_u32.lock ->&obj_hash[i].lock ->tk_core.seq.seqcount ->elock-AF_INET ->krc.lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->&tcp_hashinfo.bhash[i].lock ->key#25 FD: 1 BD: 107 ++..: k-clock-AF_INET FD: 745 BD: 2 +.+.: reg_work ->rtnl_mutex FD: 1 BD: 1 +.+.: system_transition_mutex/1 FD: 1 BD: 64 +...: reg_pending_beacons_lock FD: 1 BD: 1 +.+.: acpi_gpio_deferred_req_irqs_lock FD: 759 BD: 2 +.+.: (work_completion)(&fw_work->work) ->fs_reclaim ->pool_lock#2 ->&fw_cache.lock ->tk_core.seq.seqcount ->async_lock ->init_task.alloc_lock ->&dentry->d_lock ->&sb->s_type->i_mutex_key ->&obj_hash[i].lock ->&base->lock ->&c->lock ->&zone->lock ->&____s->seqcount ->(console_sem).lock ->console_owner_lock ->console_owner ->umhelper_sem ->fw_lock ->rtnl_mutex FD: 2 BD: 3 +.+.: &fw_cache.lock ->pool_lock#2 FD: 1 BD: 1 +.+.: prepare_lock FD: 31 BD: 1 ..-.: fs/file_table.c:368 FD: 3 BD: 4 +.+.: subsys mutex#79 ->&k->k_lock FD: 2 BD: 11 +.+.: fw_lock ->&x->wait#23 FD: 1 BD: 12 ....: &x->wait#23 FD: 4 BD: 2 +.+.: (delayed_fput_work).work ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 1 +.+.: cdev_lock FD: 336 BD: 2 +.+.: &tty->legacy_mutex ->&tty->read_wait ->&tty->write_wait ->&tty->ldisc_sem ->&tty->files_lock ->&port->lock ->&port->mutex ->&port_lock_key ->tasklist_lock ->&tty->ctrl.lock ->&f->f_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 7 ....: &tty->read_wait FD: 29 BD: 4090 -...: &tty->write_wait ->&p->pi_lock FD: 321 BD: 3 ++++: &tty->ldisc_sem ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&zone->lock ->&____s->seqcount ->init_mm.page_table_lock ->&tty->write_wait ->&tty->read_wait ->&tty->termios_rwsem ->&mm->mmap_lock ->&port_lock_key ->&port->lock ->&tty->flow.lock ->&ldata->atomic_read_lock FD: 235 BD: 6 ++++: &tty->termios_rwsem ->&port->mutex ->&tty->write_wait ->&tty->read_wait ->&ldata->output_lock ->&port_lock_key FD: 1 BD: 3 +.+.: &tty->files_lock FD: 1 BD: 4090 -...: &port->lock FD: 77 BD: 10 +.+.: hash_mutex ->fs_reclaim ->pool_lock#2 FD: 32 BD: 10 -...: &i->lock ->&port_lock_key FD: 1 BD: 1 +.+.: detected_devices_mutex FD: 1 BD: 424 ....: &wq#2 FD: 380 BD: 1 +.+.: &bdev->bd_fsfreeze_mutex ->sb_lock ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&type->s_umount_key#25/1 ->&type->s_umount_key#26/1 ->&zone->lock ->&type->s_umount_key#27/1 ->&type->s_umount_key#28/1 FD: 134 BD: 2 +.+.: &type->s_umount_key#25/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock ->&wq->mutex ->kthread_create_lock ->&p->pi_lock ->&rq->__lock ->&x->wait ->wq_pool_mutex ->mmu_notifier_invalidate_range_start ->&xa->xa_lock#7 ->lock#4 ->&mapping->private_lock ->tk_core.seq.seqcount ->&dd->lock ->bit_wait_table + i ->wq_mayday_lock ->&cfs_rq->removed.lock ->&sbi->old_work_lock ->(work_completion)(&(&sbi->old_work)->work) FD: 29 BD: 343 ..-.: bit_wait_table + i ->&p->pi_lock FD: 172 BD: 5 ++++: &type->i_mutex_dir_key#3 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->rename_lock.seqcount ->&ei->i_es_lock ->&ei->i_data_sem ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->&xa->xa_lock#7 ->lock#4 ->&c->lock ->&zone->lock ->&mapping->private_lock ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->bit_wait_table + i ->&rq->__lock ->inode_hash_lock ->&journal->j_state_lock ->&sb->s_type->i_lock_key#22 ->namespace_sem ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->tomoyo_ss ->&s->s_inode_list_lock ->&ei->xattr_sem ->jbd2_handle ->&n->list_lock ->&____s->seqcount#2 ->stock_lock ->remove_cache_srcu ->rcu_node_0 ->&sem->wait_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->&journal->j_wait_transaction_locked FD: 1 BD: 3 +.+.: &sbi->old_work_lock FD: 1 BD: 3 +.+.: (work_completion)(&(&sbi->old_work)->work) FD: 119 BD: 2 +.+.: &type->s_umount_key#26/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->&xa->xa_lock#7 ->lock#4 ->&mapping->private_lock ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->bit_wait_table + i ->&rq->__lock ->&sb->s_type->i_lock_key#3 ->lock#5 ->&lruvec->lru_lock ->&zone->lock ->crypto_alg_sem FD: 119 BD: 2 +.+.: &type->s_umount_key#27/1 ->fs_reclaim ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->&xa->xa_lock#7 ->lock#4 ->&mapping->private_lock ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->bit_wait_table + i ->&rq->__lock ->&sb->s_type->i_lock_key#3 ->lock#5 ->&lruvec->lru_lock ->&zone->lock ->crypto_alg_sem ->quarantine_lock FD: 35 BD: 116 +.+.: &bgl->locks[i].lock ->&sbi->s_md_lock ->&obj_hash[i].lock ->pool_lock#2 ->&ei->i_prealloc_lock ->&pa->pa_lock ->&lg->lg_prealloc_lock ->&pa->pa_lock#2 ->&meta->lock ->kfence_freelist_lock FD: 56 BD: 337 +.+.: &sb->s_type->i_lock_key#22 ->&dentry->d_lock ->&lru->node[i].lock ->&xa->xa_lock#7 ->bit_wait_table + i FD: 228 BD: 7 ++++: &sb->s_type->i_mutex_key#8 ->&ei->i_es_lock ->&ei->i_data_sem ->mmu_notifier_invalidate_range_start ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->integrity_iint_lock ->&rq->__lock ->remove_cache_srcu ->tk_core.seq.seqcount ->&ei->xattr_sem ->fs_reclaim ->&xa->xa_lock#7 ->lock#4 ->&mapping->private_lock ->&sb->s_type->i_lock_key#22 ->&wb->list_lock ->&journal->j_state_lock ->jbd2_handle ->&obj_hash[i].lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->rcu_node_0 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->quarantine_lock ->mapping.invalidate_lock ->free_vmap_area_lock ->vmap_area_lock ->init_mm.page_table_lock ->swap_cgroup_mutex ->&base->lock ->&fq->mq_flush_lock ->&x->wait#26 ->(&timer.timer) ->swapon_mutex ->proc_poll_wait.lock ->&dentry->d_lock ->stock_lock ->&____s->seqcount#2 ->&n->list_lock ->&mm->mmap_lock ->ima_extend_list_mutex ->&p->alloc_lock ->&list->lock ->kauditd_wait.lock ->&rcu_state.expedited_wq ->&sem->wait_lock ->&p->pi_lock ->&sbi->s_writepages_rwsem ->&folio_wait_table[i] ->lock#5 ->&dd->lock ->&journal->j_wait_commit ->&journal->j_wait_done_commit ->key#3 ->key#15 ->&cfs_rq->removed.lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&sem->waiters ->&rsp->gp_wait ->bit_wait_table + i ->&journal->j_wait_transaction_locked ->&sb->s_type->i_mutex_key#8/4 FD: 20 BD: 163 ++++: &ei->i_es_lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&sbi->s_es_lock ->&obj_hash[i].lock ->key#2 ->key#6 ->key#7 ->key#8 ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&n->list_lock ->quarantine_lock FD: 116 BD: 116 ++++: &ei->i_data_sem ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&ei->i_es_lock ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount ->&ei->i_prealloc_lock ->quarantine_lock ->&sb->s_type->i_lock_key#22 ->&(ei->i_block_reservation_lock) ->&rq->__lock ->&ei->i_raw_lock ->&wb->list_lock ->&xa->xa_lock#7 ->lock#4 ->&mapping->private_lock ->&ret->b_state_lock ->&journal->j_revoke_lock ->key#15 ->&sbi->s_md_lock ->key#3 ->&lg->lg_mutex ->&wb->work_lock ->&____s->seqcount#2 ->&n->list_lock ->&pa->pa_lock#2 ->rcu_node_0 ->remove_cache_srcu ->stock_lock ->&sem->wait_lock ->&journal->j_state_lock ->bit_wait_table + i ->&bgl->locks[i].lock ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&journal->j_wait_updates ->&p->pi_lock ->&dd->lock ->&ei->i_data_sem/1 FD: 1 BD: 164 +.+.: &sbi->s_es_lock FD: 77 BD: 116 ++++: &journal->j_state_lock ->&journal->j_wait_done_commit ->&journal->j_wait_commit ->tk_core.seq.seqcount ->&obj_hash[i].lock ->&base->lock ->&journal->j_wait_updates ->&journal->j_wait_transaction_locked ->&journal->j_list_lock ->&journal->j_wait_reserved FD: 29 BD: 117 ....: &journal->j_wait_done_commit ->&p->pi_lock FD: 1 BD: 4 +.+.: &sbi->s_error_lock FD: 34 BD: 116 ..-.: &fq->mq_flush_lock ->&q->requeue_lock ->&obj_hash[i].lock ->tk_core.seq.seqcount ->bit_wait_table + i ->&x->wait#26 FD: 1 BD: 123 ..-.: &q->requeue_lock FD: 29 BD: 117 ....: &journal->j_wait_commit ->&p->pi_lock FD: 125 BD: 3 +.+.: ext4_grpinfo_slab_create_mutex ->slab_mutex FD: 81 BD: 4 +.+.: ext4_li_mtx ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->batched_entropy_u16.lock ->&eli->li_list_mtx ->kthread_create_lock ->&p->pi_lock ->&rq->__lock ->&x->wait FD: 1 BD: 1 ....: &rs->lock FD: 18 BD: 2 +.+.: (work_completion)(&s->destroy_work) ->&rsp->gp_wait ->pcpu_lock ->&obj_hash[i].lock ->&base->lock FD: 43 BD: 67 +.+.: rcu_state.barrier_mutex ->rcu_state.barrier_lock ->&x->wait#24 ->&rq->__lock ->rcu_state.barrier_mutex.wait_lock ->&pool->lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock ->pool_lock FD: 29 BD: 68 ..-.: &x->wait#24 ->&p->pi_lock FD: 1 BD: 1 +.+.: (init_mm).mmap_lock FD: 30 BD: 1 +.-.: (&cb->timer) ->&obj_hash[i].lock ->&base->lock ->tk_core.seq.seqcount ->&rq_wait->wait FD: 136 BD: 1 +.+.: &type->s_umount_key#29/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&zone->lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#23 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_mutex_key#9 ->&dentry->d_lock FD: 42 BD: 148 +.+.: &sb->s_type->i_lock_key#23 ->&dentry->d_lock ->bit_wait_table + i FD: 115 BD: 4 ++++: &sb->s_type->i_mutex_key#9 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#23 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->rename_lock.seqcount ->proc_subdir_lock ->sysctl_lock ->&obj_hash[i].lock ->&____s->seqcount ->&c->lock ->&p->alloc_lock ->&pid->lock ->namespace_sem ->tomoyo_ss ->&n->list_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock ->&xa->xa_lock#3 ->stock_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount#2 ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&rcu_state.gp_wq ->remove_cache_srcu ->&cfs_rq->removed.lock FD: 216 BD: 2 .+.+: sb_writers#3 ->mount_lock ->&sb->s_type->i_mutex_key#9 ->sysctl_lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&h->resize_lock ->hugetlb_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#23 ->&wb->list_lock ->&dentry->d_lock ->tomoyo_ss ->&mm->mmap_lock ->oom_adj_mutex ->&c->lock ->&p->pi_lock ->&rq->__lock ->remove_cache_srcu ->&n->list_lock ->&____s->seqcount#11 ->&(&net->ipv4.ping_group_range.lock)->lock ->rcu_node_0 ->(console_sem).lock ->console_owner_lock ->console_owner ->oom_adj_mutex.wait_lock ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq FD: 79 BD: 3 +.+.: &h->resize_lock ->free_hpage_work ->hugetlb_lock ->fs_reclaim ->&____s->seqcount ->pool_lock#2 FD: 1 BD: 4 +.+.: free_hpage_work FD: 2 BD: 101 ....: hugetlb_lock ->&____s->seqcount#2 FD: 155 BD: 105 ++++: mapping.invalidate_lock ->mmu_notifier_invalidate_range_start ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&xa->xa_lock#7 ->lock#4 ->&ei->i_es_lock ->&ei->i_data_sem ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->&c->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&mapping->i_mmap_rwsem ->&journal->j_state_lock ->jbd2_handle ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&rq->__lock ->rcu_node_0 ->&mapping->private_lock ->stock_lock ->&sb->s_type->i_lock_key#22 ->lock#5 ->&lruvec->lru_lock ->&rcu_state.expedited_wq ->&meta->lock ->&____s->seqcount#2 ->&folio_wait_table[i] ->remove_cache_srcu ->&cfs_rq->removed.lock ->&journal->j_wait_transaction_locked ->quarantine_lock ->&ei->i_data_sem/1 FD: 31 BD: 1 ..-.: &(&ovs_net->masks_rebalance)->timer FD: 285 BD: 6 +.+.: (work_completion)(&(&ovs_net->masks_rebalance)->work) ->ovs_mutex ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 1 BD: 101 ++++: integrity_iint_lock FD: 182 BD: 4 +.+.: &iint->mutex ->&ei->xattr_sem ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->mmu_notifier_invalidate_range_start ->ima_extend_list_mutex ->mapping.invalidate_lock ->&folio_wait_table[i] ->&rq->__lock ->&c->lock ->&zone->lock ->&____s->seqcount ->tk_core.seq.seqcount ->&lock->wait_lock ->&cfs_rq->removed.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&n->list_lock ->remove_cache_srcu ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&____s->seqcount#2 ->&p->alloc_lock ->&list->lock ->kauditd_wait.lock ->rcu_node_0 ->&rcu_state.expedited_wq ->quarantine_lock ->stock_lock ->(console_sem).lock ->console_owner_lock ->console_owner ->ima_extend_list_mutex.wait_lock ->&p->pi_lock FD: 48 BD: 10 .+.+: &ei->xattr_sem ->&mapping->private_lock ->rcu_node_0 ->&rq->__lock FD: 1 BD: 4 ++++: entries_lock FD: 219 BD: 2 +.+.: &sig->exec_update_lock ->&p->alloc_lock ->&sighand->siglock ->&newf->file_lock ->batched_entropy_u64.lock ->&mm->mmap_lock ->delayed_uprobe_lock ->&memcg->mm_list.lock ->pgd_lock ->pool_lock#2 ->&obj_hash[i].lock ->key ->pcpu_lock ->percpu_counters_lock ->&rq->__lock ->pool_lock ->quarantine_lock ->&cfs_rq->removed.lock ->stock_lock FD: 1 BD: 158 +.+.: &memcg->mm_list.lock FD: 3 BD: 90 ..-.: batched_entropy_u16.lock ->crngs.lock FD: 29 BD: 4472 +.+.: ptlock_ptr(page)#2/1 FD: 103 BD: 1 +.+.: &type->s_umount_key#30 ->shrinker_rwsem ->&dentry->d_lock ->rename_lock.seqcount ->&dentry->d_lock/1 ->&sb->s_type->i_lock_key#23 ->&s->s_inode_list_lock ->&xa->xa_lock#7 ->sysctl_lock ->&obj_hash[i].lock ->pool_lock#2 ->&fsnotify_mark_srcu ->sb_lock FD: 42 BD: 2 +.+.: (work_completion)(&map->work) ->vmap_area_lock ->&obj_hash[i].lock ->purge_vmap_area_lock ->pool_lock#2 ->stock_lock ->&rq->__lock ->rcu_node_0 ->pcpu_lock ->&cfs_rq->removed.lock FD: 745 BD: 2 +.+.: (work_completion)(&aux->work) ->map_idr_lock ->&obj_hash[i].lock ->pool_lock#2 ->pack_mutex ->pcpu_lock ->vmap_area_lock ->purge_vmap_area_lock ->stock_lock ->&rq->__lock ->&base->lock ->quarantine_lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock FD: 1 BD: 164 ....: key#2 FD: 796 BD: 2 +.+.: &p->lock ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->file_systems_lock ->namespace_sem ->&c->lock ->&____s->seqcount ->&of->mutex ->remove_cache_srcu ->&n->list_lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq ->cpufreq_driver_lock ->module_mutex ->pgd_lock ->&obj_hash[i].lock ->key ->pcpu_lock ->percpu_counters_lock ->&cfs_rq->removed.lock ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->stock_lock FD: 120 BD: 1 +.+.: &type->s_umount_key#31/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->&c->lock ->&____s->seqcount ->list_lrus_mutex ->sb_lock ->&root->kernfs_rwsem ->&sb->s_type->i_lock_key#24 ->&root->kernfs_supers_rwsem ->&dentry->d_lock FD: 41 BD: 336 +.+.: &sb->s_type->i_lock_key#24 ->&dentry->d_lock FD: 126 BD: 3 ++++: &type->i_mutex_dir_key#4 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->rename_lock.seqcount ->&root->kernfs_rwsem ->&sb->s_type->i_lock_key#24 ->namespace_sem ->&____s->seqcount ->tk_core.seq.seqcount ->&c->lock ->&n->list_lock ->&rq->__lock ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock ->rcu_node_0 ->&obj_hash[i].lock ->&sem->wait_lock ->&p->pi_lock ->&____s->seqcount#2 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 29 BD: 158 ....: &x->wait#25 ->&p->pi_lock FD: 42 BD: 11 +.+.: &net->unx.table.locks[i] ->&net->unx.table.locks[i]/1 FD: 911 BD: 2 +.+.: &sb->s_type->i_mutex_key#10 ->&net->unx.table.locks[i] ->&u->lock ->&u->peer_wait ->rlock-AF_UNIX ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->&____s->seqcount ->rcu_node_0 ->&rq->__lock ->nl_table_lock ->nl_table_wait.lock ->clock-AF_NETLINK ->genl_sk_destructing_waitq.lock ->&nlk->wait ->wlock-AF_NETLINK ->(netlink_chain).rwsem ->tomoyo_ss ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#8 ->&wb->list_lock ->&dentry->d_lock ->sk_lock-AF_INET ->slock-AF_INET ->clock-AF_INET ->sk_lock-AF_INET6 ->slock-AF_INET6 ->clock-AF_INET6 ->&table->hash[i].lock ->&net->packet.sklist_lock ->&po->bind_lock ->sk_lock-AF_PACKET ->slock-AF_PACKET ->fanout_mutex ->&rnp->exp_wq[1] ->clock-AF_PACKET ->rlock-AF_PACKET ->pcpu_lock ->elock-AF_PACKET ->&rnp->exp_wq[3] ->&rnp->exp_wq[2] ->&cfs_rq->removed.lock ->sk_lock-AF_BLUETOOTH-BTPROTO_HCI ->slock-AF_BLUETOOTH-BTPROTO_HCI ->hci_dev_list_lock ->rlock-AF_BLUETOOTH ->wlock-AF_BLUETOOTH ->&c->lock ->quarantine_lock ->stock_lock ->clock-AF_RDS ->&rs->rs_recv_lock ->rds_cong_monitor_lock ->rds_cong_lock ->&rs->rs_lock ->&rs->rs_rdma_lock ->&q->lock ->rds_sock_lock ->&net->ipv4.ra_mutex ->&hashinfo->lock ->&rnp->exp_wq[0] ->sk_lock-AF_CAN ->slock-AF_CAN ->l2tp_ip6_lock ->clock-AF_RXRPC ->(wq_completion)krxrpcd ->&wq->mutex ->rlock-AF_RXRPC ->&p->pi_lock ->&x->wait ->clock-AF_ROSE ->sk_lock-AF_ROSE ->slock-AF_ROSE ->wlock-AF_ROSE ->&list->lock#21 ->(work_completion)(&msk->work) ->sk_lock-AF_KCM ->slock-AF_KCM ->&mux->lock ->(work_completion)(&kcm->tx_work) ->&mux->rx_lock ->&knet->mutex ->rtnl_mutex ->&rcu_state.expedited_wq ->rtnl_mutex.wait_lock ->sk_lock-AF_INET/1 ->&net->sctp.addr_wq_lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->sk_lock-AF_INET6/1 ->rcu_state.exp_mutex.wait_lock ->unix_gc_lock ->bcm_notifier_lock ->rlock-AF_CAN ->elock-AF_CAN ->l2tp_ip_lock ->sk_lock-AF_NFC ->slock-AF_NFC ->clock-AF_NFC ->rlock-AF_NFC ->&list->lock#26 ->&hashinfo->lock#2 ->krc.lock ->(work_completion)(&(&sw_ctx_tx->tx_work.work)->work) ->nfnl_grp_active_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->sk_lock-AF_VSOCK ->slock-AF_VSOCK ->rlock-AF_CAIF ->sk_lock-AF_CAIF ->slock-AF_CAIF ->elock-AF_CAIF ->&zone->lock ->&ping_table.lock ->raw_sk_list.lock ->&n->list_lock ->isotp_notifier_lock ->&meta->lock ->kfence_freelist_lock ->&bsd_socket_locks[i] ->sk_lock-AF_PPPOX ->slock-AF_PPPOX ->rlock-AF_PPPOX ->sk_lock-AF_TIPC ->slock-AF_TIPC ->&pnsocks.lock ->resource_mutex ->clock-AF_PHONET ->rlock-AF_PHONET ->l2cap_sk_list.lock ->sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP ->slock-AF_BLUETOOTH-BTPROTO_L2CAP ->&chan->lock/1 ->chan_list_lock ->sk_lock-AF_QIPCRTR ->slock-AF_QIPCRTR ->pgd_lock ->key ->percpu_counters_lock ->&net->xdp.lock ->&xs->map_list_lock ->&xs->mutex ->clock-AF_XDP ->dgram_lock ->clock-AF_IEEE802154 ->rlock-AF_IEEE802154 ->pfkey_mutex ->clock-AF_KEY ->wlock-AF_KEY ->rlock-AF_KEY ->wlock-AF_PPPOX ->&x->wait#10 ->(work_completion)(&smc->connect_work) ->sk_lock-AF_SMC ->slock-AF_SMC ->&smc->clcsock_release_lock ->raw_lock ->raw_notifier_lock ->sk_lock-AF_BLUETOOTH-BTPROTO_SCO ->slock-AF_BLUETOOTH-BTPROTO_SCO ->clock-AF_BLUETOOTH ->sco_sk_list.lock ->clock-AF_NETROM ->sk_lock-AF_NETROM ->slock-AF_NETROM ->sk_lock-AF_X25 ->slock-AF_X25 ->sk_lock-AF_AX25 ->slock-AF_AX25 ->sk_lock-AF_PHONET ->slock-AF_PHONET ->&list->lock#29 ->crypto_default_null_skcipher_lock FD: 52 BD: 7 +.+.: &u->lock ->clock-AF_UNIX ->&u->lock/1 ->&sk->sk_peer_lock ->rlock-AF_UNIX ->&u->peer_wait ->&ei->socket.wq.wait FD: 1 BD: 8 +...: clock-AF_UNIX FD: 34 BD: 8 +.+.: &u->peer_wait ->&p->pi_lock ->&ei->socket.wq.wait FD: 1 BD: 9 +.+.: rlock-AF_UNIX FD: 246 BD: 2 .+.+: sb_writers#4 ->mount_lock ->tk_core.seq.seqcount ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&journal->j_state_lock ->jbd2_handle ->&obj_hash[i].lock ->&sb->s_type->i_lock_key#22 ->&wb->list_lock ->&wb->work_lock ->&type->i_mutex_dir_key#3 ->&type->i_mutex_dir_key#3/1 ->&xa->xa_lock#7 ->lock#4 ->&mapping->private_lock ->&dd->lock ->bit_wait_table + i ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->remove_cache_srcu ->&sb->s_type->i_mutex_key#8 ->tomoyo_ss ->&sem->wait_lock ->&p->pi_lock ->&s->s_inode_list_lock ->sb_internal ->inode_hash_lock ->&fsnotify_mark_srcu ->&____s->seqcount#2 ->integrity_iint_lock ->stock_lock ->lock#5 ->&lruvec->lru_lock ->&cfs_rq->removed.lock ->&dentry->d_lock ->&iint->mutex ->rcu_node_0 ->&journal->j_list_lock ->&rcu_state.expedited_wq ->&ei->xattr_sem ->quarantine_lock ->fs_reclaim ->mapping.invalidate_lock ->&folio_wait_table[i] ->&sbi->s_writepages_rwsem ->&n->list_lock ->&journal->j_wait_commit ->&journal->j_wait_done_commit ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&journal->j_wait_transaction_locked ->&fq->mq_flush_lock ->&x->wait#26 ->&base->lock ->(&timer.timer) ->(console_sem).lock ->console_owner_lock ->console_owner ->&journal->j_barrier ->&sb->s_type->i_mutex_key#8/4 ->&ei->i_prealloc_lock ->&ei->i_es_lock FD: 1 BD: 5 +.+.: &pid->lock FD: 1 BD: 25 +.+.: &new_ns->ns_lock FD: 195 BD: 1 ++++: &type->s_umount_key#32 ->&lru->node[i].lock ->&dentry->d_lock ->&sb->s_type->i_lock_key#22 ->&obj_hash[i].lock ->pool_lock#2 ->&journal->j_state_lock ->&p->alloc_lock ->(work_completion)(&sbi->s_error_work) ->key#3 ->key#4 ->&sbi->s_error_lock ->mmu_notifier_invalidate_range_start ->tk_core.seq.seqcount ->&base->lock ->&fq->mq_flush_lock ->&rq->__lock ->bit_wait_table + i ->ext4_li_mtx ->(console_sem).lock ->mount_lock ->&xa->xa_lock#7 ->&eli->li_list_mtx ->&wb->list_lock ->&sbi->s_writepages_rwsem ->&bdi->wb_waitq ->&s->s_inode_list_lock ->&ei->i_es_lock ->inode_hash_lock ->&fsnotify_mark_srcu FD: 1 BD: 2 +.+.: (work_completion)(&sbi->s_error_work) FD: 1 BD: 117 ....: key#3 FD: 1 BD: 114 ....: key#4 FD: 4 BD: 5 +.+.: &eli->li_list_mtx ->&obj_hash[i].lock ->pool_lock#2 FD: 150 BD: 113 ++++: jbd2_handle ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&ret->b_state_lock ->&journal->j_revoke_lock ->&ei->i_raw_lock ->&journal->j_wait_updates ->&mapping->private_lock ->&meta_group_info[i]->alloc_sem ->tk_core.seq.seqcount ->inode_hash_lock ->batched_entropy_u32.lock ->&ei->i_es_lock ->&sb->s_type->i_lock_key#22 ->&rq->__lock ->&journal->j_state_lock ->bit_wait_table + i ->rcu_node_0 ->&sbi->s_orphan_lock ->&ei->i_data_sem ->&journal->j_list_lock ->&xa->xa_lock#7 ->lock#4 ->lock#5 ->&obj_hash[i].lock ->&base->lock ->&dd->lock ->&rq_wait->wait ->stock_lock ->&n->list_lock ->&cfs_rq->removed.lock ->&____s->seqcount#2 ->key#4 ->&ei->i_prealloc_lock ->&(ei->i_block_reservation_lock) ->&sem->wait_lock ->&p->pi_lock ->&rcu_state.expedited_wq ->&bgl->locks[i].lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&journal->j_wait_reserved ->&folio_wait_table[i] ->&lock->wait_lock ->remove_cache_srcu ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->crngs.lock FD: 72 BD: 118 +.+.: &ret->b_state_lock ->&journal->j_list_lock ->&obj_hash[i].lock ->bit_wait_table + i FD: 71 BD: 121 +.+.: &journal->j_list_lock ->&sb->s_type->i_lock_key#3 ->&wb->list_lock ->key#14 ->&obj_hash[i].lock ->&c->lock ->pool_lock#2 ->quarantine_lock FD: 1 BD: 115 +.+.: &journal->j_revoke_lock FD: 1 BD: 117 +.+.: &ei->i_raw_lock FD: 29 BD: 118 ....: &journal->j_wait_updates ->&p->pi_lock FD: 33 BD: 4508 ..-.: &wb->work_lock ->&obj_hash[i].lock ->&base->lock FD: 53 BD: 114 ++++: &meta_group_info[i]->alloc_sem ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->tk_core.seq.seqcount ->&dd->lock ->&x->wait#26 ->&obj_hash[i].lock ->rcu_node_0 ->&rq->__lock ->&base->lock ->(&timer.timer) ->&fq->mq_flush_lock ->&bgl->locks[i].lock FD: 155 BD: 4 .+.+: sb_internal ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&journal->j_state_lock ->jbd2_handle ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount ->&rq->__lock ->remove_cache_srcu ->rcu_node_0 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&cfs_rq->removed.lock ->quarantine_lock ->&rcu_state.expedited_wq ->&journal->j_wait_transaction_locked FD: 2 BD: 119 ++++: &ei->i_prealloc_lock ->&pa->pa_lock#2 FD: 30 BD: 1 .+.+: file_rwsem ->&ctx->flc_lock ->&rq->__lock FD: 2 BD: 2 +.+.: &ctx->flc_lock ->&fll->lock FD: 1 BD: 3 +.+.: &fll->lock FD: 208 BD: 3 +.+.: &type->i_mutex_dir_key#3/1 ->rename_lock.seqcount ->&dentry->d_lock ->fs_reclaim ->&ei->i_es_lock ->&ei->i_data_sem ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->pool_lock#2 ->&xa->xa_lock#7 ->lock#4 ->&c->lock ->&mapping->private_lock ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->bit_wait_table + i ->&rq->__lock ->inode_hash_lock ->&journal->j_state_lock ->&sb->s_type->i_lock_key#22 ->tomoyo_ss ->&s->s_inode_list_lock ->&ei->xattr_sem ->jbd2_handle ->&sb->s_type->i_mutex_key#8 ->&sem->wait_lock ->&xa->xa_lock#3 ->stock_lock ->rcu_node_0 ->&____s->seqcount#2 ->&fsnotify_mark_srcu ->&type->i_mutex_dir_key#3 ->&wb->list_lock ->sb_internal ->&n->list_lock ->remove_cache_srcu ->&rcu_state.expedited_wq ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->quarantine_lock ->&rcu_state.gp_wq ->&cfs_rq->removed.lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&u->bindlock ->&journal->j_wait_transaction_locked FD: 116 BD: 1 +.+.: &type->s_umount_key#33/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&c->lock ->&____s->seqcount ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#25 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_mutex_key#11 ->&dentry->d_lock FD: 41 BD: 3 +.+.: &sb->s_type->i_lock_key#25 ->&dentry->d_lock FD: 91 BD: 2 +.+.: &sb->s_type->i_mutex_key#11 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->&c->lock ->&____s->seqcount ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#25 ->&s->s_inode_list_lock ->tk_core.seq.seqcount FD: 91 BD: 1 +.+.: &type->s_umount_key#34 ->sb_lock ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->&lru->node[i].lock ->&obj_hash[i].lock FD: 43 BD: 1 +.+.: &type->s_umount_key#35 ->sb_lock ->&dentry->d_lock FD: 115 BD: 1 +.+.: &type->s_umount_key#36/1 ->fs_reclaim ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#26 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 41 BD: 4 +.+.: &sb->s_type->i_lock_key#26 ->&dentry->d_lock FD: 1 BD: 1 +.+.: redirect_lock FD: 318 BD: 1 +.+.: &tty->atomic_write_lock ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->&tty->termios_rwsem FD: 32 BD: 7 +.+.: &ldata->output_lock ->&port_lock_key FD: 116 BD: 1 +.+.: &type->s_umount_key#37/1 ->fs_reclaim ->pcpu_alloc_mutex ->shrinker_rwsem ->&c->lock ->&____s->seqcount ->pool_lock#2 ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#27 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&obj_hash[i].lock ->fuse_mutex ->&dentry->d_lock FD: 41 BD: 146 +.+.: &sb->s_type->i_lock_key#27 ->&dentry->d_lock FD: 1 BD: 2 +.+.: fuse_mutex FD: 116 BD: 1 +.+.: &type->s_umount_key#38/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&c->lock ->&____s->seqcount ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#28 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->pstore_sb_lock ->&dentry->d_lock FD: 41 BD: 2 +.+.: &sb->s_type->i_lock_key#28 ->&dentry->d_lock FD: 1 BD: 2 +.+.: pstore_sb_lock FD: 119 BD: 1 +.+.: &type->s_umount_key#39/1 ->fs_reclaim ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#29 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->bpf_preload_lock ->&dentry->d_lock FD: 41 BD: 2 +.+.: &sb->s_type->i_lock_key#29 ->&dentry->d_lock FD: 80 BD: 2 +.+.: bpf_preload_lock ->(kmod_concurrent_max).lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&x->wait#17 ->&rq->__lock ->running_helpers_waitq.lock FD: 29 BD: 1 ++++: uts_sem ->hostname_poll.wait.lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 105 BD: 3 ++++: &type->i_mutex_dir_key#5 ->fs_reclaim ->&dentry->d_lock ->rename_lock.seqcount ->tomoyo_ss ->&sbinfo->stat_lock ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&c->lock ->&____s->seqcount ->&n->list_lock ->&sem->wait_lock ->&obj_hash[i].lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->remove_cache_srcu ->&rq->__lock ->&cfs_rq->removed.lock ->rcu_node_0 ->&____s->seqcount#2 ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&rcu_state.expedited_wq FD: 121 BD: 2 .+.+: sb_writers#5 ->mount_lock ->&type->i_mutex_dir_key#5 ->&type->i_mutex_dir_key#5/1 ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key ->&wb->list_lock ->&sb->s_type->i_mutex_key#12 ->&sem->wait_lock ->&p->pi_lock ->&rq->__lock ->&s->s_inode_list_lock ->&info->lock ->&sbinfo->stat_lock ->&xa->xa_lock#7 ->&obj_hash[i].lock ->pool_lock#2 ->&fsnotify_mark_srcu ->tomoyo_ss ->&xattrs->lock ->fs_reclaim ->lock#4 ->lock#5 ->&lruvec->lru_lock ->&dentry->d_lock ->rcu_node_0 FD: 105 BD: 4 +.+.: &sb->s_type->i_mutex_key#12 ->&xattrs->lock ->tk_core.seq.seqcount ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&xa->xa_lock#7 ->lock#4 ->&info->lock ->&sb->s_type->i_lock_key ->&wb->list_lock ->key#9 ->&rq->__lock ->&dentry->d_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&sb->s_type->i_mutex_key#12/4 ->tomoyo_ss ->&mapping->i_mmap_rwsem ->lock#5 ->&lruvec->lru_lock ->&obj_hash[i].lock ->rcu_node_0 FD: 114 BD: 3 +.+.: &type->i_mutex_dir_key#5/1 ->rename_lock.seqcount ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&dentry->d_lock ->tomoyo_ss ->&sbinfo->stat_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&obj_hash[i].lock ->&u->bindlock ->&sb->s_type->i_mutex_key#12/4 ->&sb->s_type->i_mutex_key#12 ->&fsnotify_mark_srcu ->&sem->wait_lock ->&rq->__lock ->&n->list_lock ->lock#4 ->lock#5 ->&lruvec->lru_lock ->&info->lock ->&xa->xa_lock#7 ->rcu_node_0 ->remove_cache_srcu ->&____s->seqcount#2 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 2 BD: 5 +.+.: &f->f_lock ->fasync_lock FD: 1 BD: 2 ....: key#5 FD: 1 BD: 2 ....: hostname_poll.wait.lock FD: 205 BD: 1 .+.+: dup_mmap_sem ->&mm->mmap_lock ->&rq->__lock ->pgd_lock ->stock_lock ->&obj_hash[i].lock ->key ->pcpu_lock ->percpu_counters_lock ->&cfs_rq->removed.lock FD: 81 BD: 98 +.+.: &mm->mmap_lock/1 ->&c->lock ->&____s->seqcount ->pool_lock#2 ->fs_reclaim ->&vma->vm_lock->lock ->&mapping->i_mmap_rwsem ->&anon_vma->rwsem ->mmu_notifier_invalidate_range_start ->&mm->page_table_lock ->ptlock_ptr(page) ->ptlock_ptr(page)#2 ->&mm->context.lock ->&obj_hash[i].lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock ->&n->list_lock ->remove_cache_srcu ->&sem->wait_lock ->&p->pi_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->quarantine_lock ->stock_lock ->&____s->seqcount#2 ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&cfs_rq->removed.lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->key#24 ->&meta->lock FD: 28 BD: 99 +.+.: &mm->context.lock ->&rq->__lock FD: 1 BD: 9 .+.+: &xattrs->lock FD: 93 BD: 8 +.+.: &u->bindlock ->&net->unx.table.locks[i] ->&net->unx.table.locks[i]/1 ->&bsd_socket_locks[i] ->fs_reclaim ->pool_lock#2 ->batched_entropy_u32.lock ->&rq->__lock FD: 41 BD: 12 +.+.: &net->unx.table.locks[i]/1 ->&dentry->d_lock FD: 1 BD: 11 +.+.: &bsd_socket_locks[i] FD: 218 BD: 2 +.+.: &u->iolock ->rlock-AF_UNIX ->&mm->mmap_lock ->&obj_hash[i].lock ->pool_lock#2 ->quarantine_lock ->&rq->__lock ->&meta->lock ->kfence_freelist_lock ->&u->peer_wait ->&u->lock ->fs_reclaim ->&____s->seqcount ->rcu_node_0 ->&dir->lock ->&rcu_state.expedited_wq ->stock_lock ->&base->lock FD: 33 BD: 4172 ..-.: &ei->socket.wq.wait ->&p->pi_lock ->&ep->lock ->&ep->poll_wait/1 FD: 1 BD: 424 ....: &wq#3 FD: 43 BD: 8 +.+.: &u->lock/1 ->&sk->sk_peer_lock ->&dentry->d_lock ->&sk->sk_peer_lock/1 FD: 105 BD: 1 +.+.: &group->mark_mutex ->&fsnotify_mark_srcu ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&c->lock ->lock ->ucounts_lock ->&rq->__lock ->&mark->lock ->&conn->lock ->&sb->s_type->i_lock_key#5 ->&sb->s_type->i_lock_key#22 ->&sb->s_type->i_lock_key ->&____s->seqcount#2 ->remove_cache_srcu ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->&n->list_lock ->&lock->wait_lock FD: 13 BD: 221 +.+.: &group->inotify_data.idr_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock FD: 3 BD: 2 +.+.: &mark->lock ->&fsnotify_mark_srcu ->&conn->lock FD: 1 BD: 7 +.+.: &conn->lock FD: 1 BD: 1 +.+.: &evdev->client_lock FD: 210 BD: 1 +.+.: &evdev->mutex ->&dev->mutex#2 ->&mm->mmap_lock FD: 217 BD: 2 +.+.: sk_lock-AF_NETLINK ->slock-AF_NETLINK ->&mm->mmap_lock ->fs_reclaim ->&c->lock ->&n->list_lock ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->pcpu_alloc_mutex ->&obj_hash[i].lock ->pack_mutex ->batched_entropy_u32.lock ->text_mutex ->&fp->aux->used_maps_mutex ->rcu_node_0 ->&rq->__lock FD: 1 BD: 3 +...: slock-AF_NETLINK FD: 1 BD: 4168 ..-.: rlock-AF_NETLINK FD: 1 BD: 7 ....: &nlk->wait FD: 82 BD: 67 +.+.: (work_completion)(&ht->run_work) ->&ht->mutex ->&rq->__lock FD: 81 BD: 68 +.+.: &ht->mutex ->fs_reclaim ->pool_lock#2 ->batched_entropy_u32.lock ->rhashtable_bucket ->&ht->lock ->&c->lock ->&____s->seqcount ->&rq->__lock ->&obj_hash[i].lock ->&n->list_lock ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount#2 ->rcu_node_0 ->quarantine_lock ->&meta->lock FD: 1 BD: 3956 ....: rhashtable_bucket/1 FD: 12 BD: 69 +.+.: &ht->lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 3 +...: clock-AF_NETLINK FD: 1 BD: 7 ....: genl_sk_destructing_waitq.lock FD: 1 BD: 7 ....: wlock-AF_NETLINK FD: 152 BD: 2 +.+.: (work_completion)(&w->w) ->nfc_devlist_mutex ->&obj_hash[i].lock ->pool_lock#2 ->&meta->lock ->kfence_freelist_lock ->&base->lock ->&rq->__lock ->quarantine_lock FD: 1 BD: 4 +.+.: &genl_data->genl_data_mutex FD: 1 BD: 4 +...: &rdev->beacon_registrations_lock FD: 1 BD: 68 +...: &rdev->mgmt_registrations_lock FD: 1 BD: 4 +...: &wdev->pmsr_lock FD: 1 BD: 65 +.+.: reg_indoor_lock FD: 916 BD: 1 .+.+: sb_writers#6 ->mount_lock ->&sb->s_type->i_mutex_key#10 ->&rq->__lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#8 ->&wb->list_lock FD: 2 BD: 9 +.+.: &sk->sk_peer_lock ->&sk->sk_peer_lock/1 FD: 32 BD: 7 ....: &group->notification_waitq ->&p->pi_lock ->&ep->lock FD: 1 BD: 7 +.+.: &group->notification_lock FD: 1 BD: 1 ....: &client->wait FD: 1 BD: 164 ....: key#6 FD: 1 BD: 164 ....: key#7 FD: 1 BD: 164 ....: key#8 FD: 246 BD: 1 +.+.: &pipe->mutex/1 ->&pipe->rd_wait ->&rq->__lock ->&lock->wait_lock ->&pipe->wr_wait ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&mm->mmap_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->rcu_node_0 ->stock_lock ->sk_lock-AF_NETLINK ->slock-AF_NETLINK ->free_vmap_area_lock ->vmap_area_lock ->init_mm.page_table_lock ->&c->lock ->tk_core.seq.seqcount ->purge_vmap_area_lock ->&sighand->siglock ->&rcu_state.expedited_wq ->&n->list_lock ->nfnl_subsys_ctnetlink ->rlock-AF_NETLINK ->&____s->seqcount#2 ->&u->iolock ->&ei->socket.wq.wait FD: 32 BD: 4 ....: &pipe->rd_wait ->&p->pi_lock ->&ep->lock FD: 1 BD: 125 ....: key#9 FD: 29 BD: 4 ....: &pipe->wr_wait ->&p->pi_lock FD: 47 BD: 1 .+.+: sb_writers#7 ->tk_core.seq.seqcount ->mount_lock ->&rq->__lock ->rcu_node_0 ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->&rcu_state.expedited_wq FD: 44 BD: 5 +.+.: &sb->s_type->i_mutex_key#12/4 ->&dentry->d_lock ->tk_core.seq.seqcount ->rename_lock ->&rq->__lock ->rcu_node_0 ->&cfs_rq->removed.lock FD: 4 BD: 424 +.+.: &dentry->d_lock/2 ->&dentry->d_lock/3 FD: 3 BD: 425 +.+.: &dentry->d_lock/3 ->&____s->seqcount#4 FD: 1 BD: 427 +.+.: &____s->seqcount#4/1 FD: 2 BD: 1 +.+.: sk_lock-AF_UNIX ->slock-AF_UNIX FD: 1 BD: 2 +...: slock-AF_UNIX FD: 1 BD: 1 ....: &rs->lock#2 FD: 55 BD: 3 +.+.: oom_adj_mutex ->&p->alloc_lock ->&rq->__lock ->rcu_node_0 ->oom_adj_mutex.wait_lock FD: 172 BD: 2 +.+.: &ep->mtx ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&f->f_lock ->&ei->socket.wq.wait ->&ep->lock ->&group->notification_waitq ->&group->notification_lock ->&sighand->signalfd_wqh ->&sighand->siglock ->&rq->__lock ->&pipe->rd_wait ->&obj_hash[i].lock ->remove_cache_srcu ->key#11 ->&lock->wait_lock ->rcu_node_0 ->&pipe->wr_wait ->&p->pi_lock ->stock_lock ->wakeup_ida.xa_lock ->&x->wait#9 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->uevent_sock_mutex ->subsys mutex#15 ->events_lock ->&dentry->d_lock ->&n->list_lock ->&u->lock ->&ws->lock ->&ACCESS_PRIVATE(sdp, lock) ->&cfs_rq->removed.lock ->wakeup_srcu ->&x->wait#3 ->(&ws->timer) ->&base->lock ->deferred_probe_mutex ->device_links_lock ->mmu_notifier_invalidate_range_start ->deleted_ws.lock ->&rcu_state.expedited_wq ->&sem->wait_lock ->uevent_sock_mutex.wait_lock ->&____s->seqcount#2 FD: 173 BD: 1 +.+.: epnested_mutex ->&ep->mtx FD: 31 BD: 4180 ...-: &ep->lock ->&ep->wq ->&ws->lock FD: 32 BD: 123 ....: &sighand->signalfd_wqh ->&p->pi_lock ->&ep->lock FD: 795 BD: 1 .+.+: sb_writers#8 ->mount_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#24 ->&wb->list_lock ->&type->i_mutex_dir_key#4 ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->&of->mutex ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->remove_cache_srcu ->&rq->__lock ->rcu_node_0 ->&root->kernfs_iattr_rwsem ->&dentry->d_lock ->tomoyo_ss ->&sb->s_type->i_mutex_key#13 ->iattr_mutex ->&xattrs->lock ->&____s->seqcount#2 ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&cfs_rq->removed.lock FD: 11 BD: 10 +.+.: swap_lock ->&p->lock#2 FD: 91 BD: 1 .+.+: kn->active ->fs_reclaim ->pool_lock#2 ->&kernfs_locks->open_file_mutex[count] ->&k->list_lock ->uevent_sock_mutex ->&obj_hash[i].lock ->&c->lock ->remove_cache_srcu ->quarantine_lock ->&n->list_lock ->&rq->__lock FD: 77 BD: 60 +.+.: &kernfs_locks->open_file_mutex[count] ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&c->lock ->&____s->seqcount ->&n->list_lock ->&rq->__lock ->rcu_node_0 ->remove_cache_srcu ->&rcu_state.expedited_wq ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&cfs_rq->removed.lock FD: 79 BD: 2 +.+.: &sb->s_type->i_mutex_key#13 ->&rq->__lock ->tk_core.seq.seqcount ->&root->kernfs_iattr_rwsem ->&sem->wait_lock ->&p->pi_lock FD: 787 BD: 6 +.+.: &of->mutex ->&rq->__lock ->cgroup_mutex FD: 29 BD: 4181 ..-.: &ep->wq ->&p->pi_lock FD: 90 BD: 1 .+.+: kn->active#2 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->pool_lock#2 ->uevent_sock_mutex ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount ->&n->list_lock ->quarantine_lock ->&rq->__lock ->remove_cache_srcu FD: 1 BD: 4481 ....: &sem->wait_lock FD: 35 BD: 2 +.+.: (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) ->krc.lock ->&obj_hash[i].lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 90 BD: 1 .+.+: kn->active#3 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount ->pool_lock#2 ->uevent_sock_mutex ->&obj_hash[i].lock ->&rq->__lock ->quarantine_lock ->&n->list_lock ->remove_cache_srcu FD: 84 BD: 1 .+.+: kn->active#4 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->param_lock ->pool_lock#2 ->&on->poll ->&c->lock ->&rq->__lock ->&n->list_lock ->&____s->seqcount FD: 77 BD: 223 +.+.: iattr_mutex ->fs_reclaim ->&____s->seqcount ->&rq->__lock ->pool_lock#2 ->&c->lock ->tk_core.seq.seqcount ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 66 +.+.: disk_events_mutex FD: 29 BD: 117 ..-.: &x->wait#26 ->&p->pi_lock FD: 137 BD: 1 .+.+: kn->active#5 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount ->pool_lock#2 ->uevent_sock_mutex ->&obj_hash[i].lock ->&rq->__lock ->&n->list_lock ->quarantine_lock ->remove_cache_srcu ->&device->physical_node_lock ->udc_lock ->fw_lock ->rcu_node_0 ->&rfkill->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&cfs_rq->removed.lock ->&base->lock ->&____s->seqcount#2 ->uevent_sock_mutex.wait_lock ->&p->pi_lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&meta->lock FD: 80 BD: 1 .+.+: kn->active#6 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock FD: 80 BD: 1 .+.+: kn->active#7 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] ->&____s->seqcount FD: 80 BD: 1 .+.+: kn->active#8 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 80 BD: 1 .+.+: kn->active#9 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock ->&rq->__lock FD: 80 BD: 1 .+.+: kn->active#10 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock FD: 80 BD: 1 .+.+: kn->active#11 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount FD: 80 BD: 1 .+.+: kn->active#12 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock FD: 80 BD: 1 .+.+: kn->active#13 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 80 BD: 1 .+.+: kn->active#14 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&n->list_lock FD: 1 BD: 134 +.+.: rcu_state.exp_mutex.wait_lock FD: 78 BD: 1 .+.+: kn->active#15 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->dev_base_lock ->&c->lock ->&n->list_lock ->remove_cache_srcu FD: 81 BD: 1 .+.+: kn->active#16 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->dev_base_lock ->&c->lock ->&n->list_lock FD: 80 BD: 1 .+.+: kn->active#17 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock FD: 81 BD: 1 .+.+: kn->active#18 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->dev_base_lock ->&c->lock FD: 81 BD: 1 .+.+: kn->active#19 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->dev_base_lock ->&c->lock FD: 81 BD: 1 .+.+: kn->active#20 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] ->dev_base_lock FD: 85 BD: 1 .+.+: kn->active#21 ->fs_reclaim ->pool_lock#2 ->&kernfs_locks->open_file_mutex[count] ->&dev->power.lock ->pci_lock FD: 2 BD: 8 ....: pci_lock ->pci_config_lock FD: 80 BD: 1 .+.+: kn->active#22 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 80 BD: 1 .+.+: kn->active#23 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 80 BD: 1 .+.+: kn->active#24 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock FD: 80 BD: 1 .+.+: kn->active#25 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount ->&n->list_lock FD: 80 BD: 1 .+.+: kn->active#26 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock ->&____s->seqcount FD: 80 BD: 1 .+.+: kn->active#27 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock FD: 77 BD: 1 .+.+: kn->active#28 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->remove_cache_srcu ->&____s->seqcount FD: 77 BD: 1 .+.+: kn->active#29 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock ->remove_cache_srcu FD: 77 BD: 1 .+.+: kn->active#30 ->fs_reclaim ->&c->lock ->&n->list_lock ->&kernfs_locks->open_file_mutex[count] ->&____s->seqcount ->remove_cache_srcu FD: 80 BD: 1 .+.+: kn->active#31 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 80 BD: 1 .+.+: kn->active#32 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 80 BD: 1 .+.+: kn->active#33 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 1 BD: 1 +.+.: &mousedev->client_lock FD: 37 BD: 8 +.+.: &mousedev->mutex#2 ->&dev->mutex#2 FD: 1 BD: 1 +.+.: &sb->s_type->i_mutex_key#14 FD: 56 BD: 1 .+.+: mapping.invalidate_lock#2 ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->pool_lock#2 ->&xa->xa_lock#7 ->lock#4 ->tk_core.seq.seqcount ->&dd->lock ->&c->lock ->&n->list_lock ->&rq->__lock FD: 80 BD: 1 .+.+: kn->active#34 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] FD: 80 BD: 1 .+.+: kn->active#35 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 80 BD: 1 .+.+: kn->active#36 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] FD: 77 BD: 1 .+.+: kn->active#37 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock ->&____s->seqcount ->remove_cache_srcu FD: 81 BD: 1 .+.+: kn->active#38 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->i2c_dev_list_lock FD: 1 BD: 3 +.+.: tomoyo_policy_lock.wait_lock FD: 82 BD: 2 +.+.: &dev_instance->mutex ->fs_reclaim ->pool_lock#2 ->vicodec_core:1844:(hdl)->_lock ->&c->lock ->&vdev->fh_lock ->&m2m_dev->job_spinlock ->&q->done_wq ->&q->mmap_lock ->&obj_hash[i].lock FD: 4 BD: 3 +.+.: vicodec_core:1844:(hdl)->_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 5 ....: &vdev->fh_lock FD: 87 BD: 1 +.+.: &mdev->req_queue_mutex ->&dev_instance->mutex ->&vdev->fh_lock ->&mdev->graph_mutex ->vicodec_core:1844:(hdl)->_lock ->&obj_hash[i].lock ->pool_lock#2 ->vim2m:1183:(hdl)->_lock ->&dev->dev_mutex ->&dev->mutex#3 FD: 1 BD: 4 ....: &m2m_dev->job_spinlock FD: 1 BD: 4 ....: &q->done_wq FD: 1 BD: 4 +.+.: &q->mmap_lock FD: 1 BD: 1 +.+.: fh->state->lock FD: 82 BD: 2 +.+.: &dev->dev_mutex ->fs_reclaim ->pool_lock#2 ->vim2m:1183:(hdl)->_lock ->&obj_hash[i].lock ->&vdev->fh_lock ->&m2m_dev->job_spinlock ->&q->done_wq ->&q->mmap_lock FD: 4 BD: 3 +.+.: vim2m:1183:(hdl)->_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 77 BD: 1 .+.+: kn->active#39 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount ->remove_cache_srcu ->&n->list_lock FD: 33 BD: 1 ..-.: &(&wb->dwork)->timer FD: 164 BD: 1 +.+.: (wq_completion)writeback ->(work_completion)(&(&wb->dwork)->work) ->(work_completion)(&(&wb->bw_dwork)->work) FD: 162 BD: 2 +.+.: (work_completion)(&(&wb->dwork)->work) ->&wb->work_lock ->&wb->list_lock ->&p->sequence ->key#10 ->&sb->s_type->i_lock_key#22 ->&sbi->s_writepages_rwsem ->pool_lock#2 ->&dd->lock ->&obj_hash[i].lock ->&pl->lock ->&rq->__lock ->&bdi->wb_waitq FD: 2 BD: 146 +.-.: &p->sequence ->key#13 FD: 1 BD: 4509 ..-.: key#10 FD: 1 BD: 1 +.+.: &vcapture->lock FD: 2 BD: 2 +.+.: &dev->mutex#3 ->&vdev->fh_lock FD: 1 BD: 10 +.+.: &sk->sk_peer_lock/1 FD: 29 BD: 1 +.-.: (&journal->j_commit_timer) ->&p->pi_lock FD: 109 BD: 4 +.+.: &journal->j_checkpoint_mutex ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->&base->lock ->bit_wait_table + i ->&rq->__lock ->&journal->j_state_lock ->&fq->mq_flush_lock ->&x->wait#26 ->&journal->j_list_lock ->&c->lock ->rcu_node_0 ->kfence_freelist_lock ->(&timer.timer) ->&ei->i_es_lock ->&mapping->private_lock ->&meta->lock ->&sb->s_type->i_lock_key#3 ->lock#4 ->lock#5 ->&lruvec->lru_lock ->&____s->seqcount ->&____s->seqcount#2 ->&n->list_lock ->&pool->lock#4 ->&rq_wait->wait ->&cfs_rq->removed.lock FD: 29 BD: 119 ....: &journal->j_wait_transaction_locked ->&p->pi_lock FD: 1 BD: 4481 ..-.: &memcg->move_lock FD: 1 BD: 119 +.+.: &sbi->s_md_lock FD: 1 BD: 1 ....: &journal->j_fc_wait FD: 1 BD: 1 +.+.: &journal->j_history_lock FD: 43 BD: 4 +.+.: &sb->s_type->i_mutex_key#4/4 ->&dentry->d_lock ->tk_core.seq.seqcount ->rename_lock FD: 1 BD: 3 ....: key#11 FD: 31 BD: 1 ..-.: drivers/base/dd.c:321 FD: 39 BD: 2 +.+.: (deferred_probe_timeout_work).work ->device_links_lock ->deferred_probe_mutex ->deferred_probe_work FD: 160 BD: 11 ++++: &sbi->s_writepages_rwsem ->&xa->xa_lock#7 ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->pool_lock#2 ->&c->lock ->lock#4 ->lock#5 ->&obj_hash[i].lock ->&journal->j_state_lock ->jbd2_handle ->tk_core.seq.seqcount ->&dd->lock ->&base->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&rq_wait->wait ->rcu_node_0 ->&rq->__lock ->&____s->seqcount#2 ->&mapping->private_lock ->&folio_wait_table[i] ->&rcu_state.expedited_wq ->&n->list_lock ->remove_cache_srcu ->&cfs_rq->removed.lock ->&rsp->gp_wait ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&ei->i_data_sem ->&rnp->exp_lock ->rcu_state.exp_mutex FD: 83 BD: 1 .+.+: &type->s_umount_key#40 ->&sb->s_type->i_lock_key#3 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->&base->lock ->&c->lock ->lock#4 ->lock#5 ->&wb->list_lock ->&rq->__lock ->rcu_node_0 ->&____s->seqcount ->&rq_wait->wait ->lock#10 ->&____s->seqcount#2 FD: 1 BD: 4508 ..-.: &s->s_inode_wblist_lock FD: 1 BD: 4509 ..-.: key#12 FD: 33 BD: 1 ..-.: &(&wb->bw_dwork)->timer FD: 68 BD: 2 +.+.: (work_completion)(&(&wb->bw_dwork)->work) ->&wb->list_lock ->&rq->__lock FD: 80 BD: 1 .+.+: kn->active#40 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 80 BD: 1 .+.+: kn->active#41 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 80 BD: 1 .+.+: kn->active#42 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 80 BD: 1 .+.+: kn->active#43 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 28 BD: 5 +.+.: &lo->lo_mutex ->&rq->__lock FD: 49 BD: 5 +.+.: &nbd->config_lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&bdev->bd_size_lock ->&q->queue_lock ->&ACCESS_PRIVATE(sdp, lock) ->set->srcu ->&obj_hash[i].lock ->&rq->__lock ->&x->wait#3 ->&c->lock FD: 32 BD: 8 ....: &ACCESS_PRIVATE(ssp->srcu_sup, lock) ->&ACCESS_PRIVATE(sdp, lock) FD: 80 BD: 1 .+.+: kn->active#44 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 2 BD: 4 +.+.: &new->lock ->&mtdblk->cache_mutex FD: 1 BD: 5 +.+.: &mtdblk->cache_mutex FD: 1 BD: 6 ....: &tags->lock FD: 80 BD: 1 .+.+: kn->active#45 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 205 BD: 1 +.+.: &mtd->master.chrdev_lock ->&mm->mmap_lock FD: 16 BD: 1 +.-.: (&dom->period_timer) ->key#13 ->&p->sequence ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 4510 ..-.: key#13 FD: 1 BD: 4 +.+.: destroy_lock FD: 33 BD: 1 ..-.: fs/notify/mark.c:89 FD: 89 BD: 2 +.+.: (reaper_work).work ->destroy_lock ->&ACCESS_PRIVATE(sdp, lock) ->&fsnotify_mark_srcu ->&obj_hash[i].lock ->&x->wait#3 ->&rq->__lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq ->pool_lock ->&ACCESS_PRIVATE(ssp->srcu_sup, lock) ->quarantine_lock ->&cfs_rq->removed.lock FD: 89 BD: 2 +.+.: connector_reaper_work ->destroy_lock ->&ACCESS_PRIVATE(sdp, lock) ->&fsnotify_mark_srcu ->&obj_hash[i].lock ->&x->wait#3 ->&rq->__lock ->&____s->seqcount ->pool_lock#2 ->pool_lock ->rcu_node_0 ->&cfs_rq->removed.lock ->quarantine_lock ->&ACCESS_PRIVATE(ssp->srcu_sup, lock) ->&rcu_state.expedited_wq FD: 1 BD: 1 +.+.: userns_state_mutex FD: 4 BD: 65 +...: fib_info_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 88 BD: 65 +...: &net->sctp.local_addr_lock ->&net->sctp.addr_wq_lock FD: 87 BD: 67 +.-.: &net->sctp.addr_wq_lock ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->&c->lock ->&n->list_lock ->&____s->seqcount ->&____s->seqcount#2 ->k-slock-AF_INET6/1 ->slock-AF_INET/1 ->slock-AF_INET6/1 ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 1 BD: 64 +...: _xmit_LOOPBACK FD: 28 BD: 72 .+.+: netpoll_srcu ->&rq->__lock FD: 1 BD: 75 +.-.: &in_dev->mc_tomb_lock FD: 18 BD: 75 +.-.: &im->lock ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount ->&n->list_lock ->&obj_hash[i].lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount#2 ->init_task.mems_allowed_seq.seqcount FD: 1 BD: 71 +.+.: cbs_list_lock FD: 31 BD: 65 +...: &net->ipv6.addrconf_hash_lock ->&obj_hash[i].lock ->&base->lock FD: 32 BD: 3987 +...: &ifa->lock ->batched_entropy_u32.lock ->crngs.lock ->&obj_hash[i].lock ->&base->lock FD: 45 BD: 3990 +...: &tb->tb6_lock ->&net->ipv6.fib6_walker_lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->nl_table_lock ->&obj_hash[i].lock ->nl_table_wait.lock ->rlock-AF_NETLINK ->rt6_exception_lock ->quarantine_lock ->&data->fib_event_queue_lock ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&____s->seqcount#2 ->stock_lock FD: 1 BD: 3991 ++..: &net->ipv6.fib6_walker_lock FD: 295 BD: 65 +.+.: sk_lock-AF_INET ->slock-AF_INET ->&table->hash[i].lock ->&tcp_hashinfo.bhash[i].lock ->&h->lhash2[i].lock ->&queue->rskq_lock ->clock-AF_INET ->&obj_hash[i].lock ->&base->lock ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&mm->mmap_lock ->tk_core.seq.seqcount ->&sd->defer_lock ->mmu_notifier_invalidate_range_start ->&hashinfo->ehash_locks[i] ->elock-AF_INET ->&meta->lock ->kfence_freelist_lock ->&n->list_lock ->rcu_node_0 ->&rq->__lock ->batched_entropy_u8.lock ->remove_cache_srcu ->&____s->seqcount#8 ->once_mutex ->&pool->lock ->batched_entropy_u32.lock ->batched_entropy_u16.lock ->&ei->socket.wq.wait ->quarantine_lock ->&cfs_rq->removed.lock ->&____s->seqcount#2 ->&rcu_state.expedited_wq ->stock_lock ->&sctp_port_hashtable[i].lock ->crngs.lock ->&asoc->wait ->krc.lock ->sctp_assocs_id_lock ->&list->lock#25 ->(&tw->tw_timer) ->&in_dev->mc_tomb_lock ->&im->lock ->free_vmap_area_lock ->vmap_area_lock ->pcpu_alloc_mutex ->pack_mutex ->text_mutex ->&fp->aux->used_maps_mutex ->&sb->s_type->i_lock_key#8 ->&dir->lock ->k-sk_lock-AF_INET/1 ->k-slock-AF_INET ->k-clock-AF_INET ->k-sk_lock-AF_INET ->&xa->xa_lock#7 ->&fsnotify_mark_srcu ->&msk->pm.lock ->&sighand->siglock ->&f->f_owner.lock ->&token_hash[i].lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->key#29 ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 81 BD: 80 +.-.: slock-AF_INET ->&obj_hash[i].lock ->batched_entropy_u16.lock ->&tcp_hashinfo.bhash[i].lock ->&hashinfo->ehash_locks[i] ->tk_core.seq.seqcount ->(&req->rsk_timer) ->&base->lock ->&queue->rskq_lock ->pool_lock#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&c->lock ->&____s->seqcount ->elock-AF_INET ->batched_entropy_u32.lock ->&sk->sk_lock.wq ->key#25 ->&n->list_lock FD: 1 BD: 107 ++..: clock-AF_INET FD: 344 BD: 68 +.+.: sk_lock-AF_INET6 ->slock-AF_INET6 ->&table->hash[i].lock ->&____s->seqcount#8 ->batched_entropy_u32.lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->batched_entropy_u16.lock ->&tcp_hashinfo.bhash[i].lock ->&h->lhash2[i].lock ->fs_reclaim ->&mm->mmap_lock ->once_lock ->rcu_node_0 ->&rq->__lock ->tk_core.seq.seqcount ->clock-AF_INET6 ->&dccp_hashinfo.bhash[i].lock ->stock_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&dir->lock ->k-sk_lock-AF_INET6/1 ->k-slock-AF_INET6 ->k-clock-AF_INET6 ->crngs.lock ->&token_hash[i].lock ->k-sk_lock-AF_INET6 ->&ei->socket.wq.wait ->&xa->xa_lock#7 ->&fsnotify_mark_srcu ->&msk->pm.lock ->elock-AF_INET6 ->&idev->mc_lock ->krc.lock ->&n->list_lock ->&hashinfo->ehash_locks[i] ->&sctp_port_hashtable[i].lock ->&rcu_state.expedited_wq ->&____s->seqcount#2 ->&base->lock ->&list->lock#5 ->&asoc->wait ->sctp_assocs_id_lock ->&list->lock#25 ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock ->tcpv6_prot_mutex ->device_spinlock ->crypto_alg_sem ->(kmod_concurrent_max).lock ->&x->wait#17 ->running_helpers_waitq.lock ->(crypto_chain).rwsem ->&x->wait#21 ->(&timer.timer) ->&sw_ctx_tx->encrypt_compl_lock ->&sem->wait_lock ->&p->pi_lock ->lock ->l2tp_ip6_lock ->&ping_table.lock ->&cfs_rq->removed.lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&meta->lock ->&queue->rskq_lock FD: 72 BD: 72 +.-.: slock-AF_INET6 ->&obj_hash[i].lock ->elock-AF_INET6 ->&dccp_hashinfo.bhash[i].lock ->pool_lock#2 ->&sk->sk_lock.wq ->tk_core.seq.seqcount ->&c->lock ->&base->lock ->&tcp_hashinfo.bhash[i].lock ->&n->list_lock ->key#25 ->&list->lock#25 ->krc.lock FD: 42 BD: 111 ++.-: clock-AF_INET6 ->pool_lock#2 ->&c->lock ->rds_tcp_tc_list_lock ->&cp->cp_lock ->&rm->m_rs_lock ->&obj_hash[i].lock ->&____s->seqcount#2 ->&____s->seqcount ->&list->lock#27 ->tk_core.seq.seqcount ->&n->list_lock ->&sd->defer_lock FD: 839 BD: 1 +.+.: &f->f_pos_lock ->sb_writers#5 ->&p->lock ->&type->i_mutex_dir_key#4 ->&mm->mmap_lock ->&rq->__lock ->sb_writers#4 ->&lock->wait_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->sb_writers#3 ->(console_sem).lock ->tk_core.seq.seqcount FD: 80 BD: 1 .+.+: kn->active#46 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 188 BD: 64 ++++: dev_addr_sem ->net_rwsem ->&tn->lock ->&sdata->sec_mtx ->fs_reclaim ->pool_lock#2 ->nl_table_lock ->rlock-AF_NETLINK ->nl_table_wait.lock ->&tbl->lock ->&pn->hash_lock ->&obj_hash[i].lock ->input_pool.lock ->rcu_node_0 ->&rq->__lock ->&c->lock ->&____s->seqcount ->&br->lock ->remove_cache_srcu ->team->team_lock_key ->&n->list_lock ->team->team_lock_key#2 ->team->team_lock_key#3 ->team->team_lock_key#4 ->team->team_lock_key#5 ->quarantine_lock ->_xmit_ETHER ->team->team_lock_key#6 ->&hard_iface->bat_iv.ogm_buff_mutex ->&____s->seqcount#2 FD: 781 BD: 2 +.+.: nlk_cb_mutex-GENERIC ->fs_reclaim ->pool_lock#2 ->&____s->seqcount ->rtnl_mutex ->&rdev->wiphy.mtx ->rlock-AF_NETLINK ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&devlink->lock_key ->&devlink->lock_key#2 ->&devlink->lock_key#3 ->&devlink->lock_key#4 ->&devlink->lock_key#5 ->&devlink->lock_key#6 ->genl_mutex ->&rq->__lock ->&____s->seqcount#2 FD: 21 BD: 68 +...: &rdev->bss_lock ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->quarantine_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&base->lock FD: 88 BD: 1 +.-.: (&net->sctp.addr_wq_timer) ->&net->sctp.addr_wq_lock FD: 14 BD: 64 ++..: lapb_list_lock ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 64 ++.-: x25_neigh_list_lock FD: 1 BD: 64 +...: _xmit_SLIP FD: 15 BD: 1 +.-.: (&eql->timer) ->&eql->queue.lock ->&obj_hash[i].lock ->&base->lock FD: 4 BD: 68 +.-.: &eql->queue.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 64 +...: &vi->refill_lock FD: 60 BD: 3916 +.-.: _xmit_ETHER#2 ->&obj_hash[i].lock ->pool_lock#2 ->quarantine_lock ->&meta->lock ->kfence_freelist_lock ->&____s->seqcount FD: 81 BD: 74 +.+.: &local->chanctx_mtx ->fs_reclaim ->pool_lock#2 ->&data->mutex ->&c->lock ->&____s->seqcount#2 ->&n->list_lock ->&rq->__lock ->&local->queue_stop_reason_lock ->&obj_hash[i].lock ->krc.lock FD: 28 BD: 75 +.+.: &data->mutex ->&rq->__lock FD: 19 BD: 3918 +...: &local->filter_lock ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount FD: 21 BD: 1 +.+.: (wq_completion)phy0 ->(work_completion)(&local->reconfig_filter) FD: 20 BD: 15 +.+.: (work_completion)(&local->reconfig_filter) ->&local->filter_lock FD: 75 BD: 66 +.-.: &dev->tx_global_lock ->_xmit_ETHER#2 ->&obj_hash[i].lock ->&base->lock ->&nr_netdev_xmit_lock_key ->_xmit_LOOPBACK#2 ->&qdisc_xmit_lock_key ->_xmit_TUNNEL#2 ->_xmit_NETROM ->&qdisc_xmit_lock_key#2 ->&vlan_netdev_xmit_lock_key FD: 1 BD: 69 +.-.: &sch->q.lock FD: 1 BD: 66 ....: class FD: 1 BD: 66 ....: (&tbl->proxy_timer) FD: 21 BD: 1 +.+.: (wq_completion)phy1 ->(work_completion)(&local->reconfig_filter) FD: 1 BD: 64 +...: _xmit_VOID FD: 1 BD: 64 +...: _xmit_X25 FD: 4 BD: 65 +...: &lapbeth->up_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 64 BD: 65 +.-.: &lapb->lock ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->&c->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&n->list_lock ->&____s->seqcount ->&list->lock#6 ->&list->lock#7 FD: 2 BD: 115 +.+.: &(ei->i_block_reservation_lock) ->key#15 FD: 770 BD: 2 +.+.: (work_completion)(&work->work) ->devices_rwsem ->&obj_hash[i].lock ->rcu_node_0 ->&rq->__lock ->quarantine_lock ->&meta->lock ->kfence_freelist_lock FD: 745 BD: 2 +.+.: (work_completion)(&(&ifa->dad_work)->work) ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 84 ....: &____s->seqcount#8 FD: 12 BD: 3964 +.-.: &ul->lock ->pool_lock#2 ->&dir->lock#2 ->&c->lock FD: 1 BD: 3991 +.-.: rt6_exception_lock FD: 1 BD: 124 ....: &tty->ctrl.lock FD: 1 BD: 6 +.+.: fasync_lock FD: 1 BD: 1 +.+.: &buf->lock FD: 1 BD: 7 ....: &tty->flow.lock FD: 72 BD: 65 +...: dev->qdisc_tx_busylock ?: &qdisc_tx_busylock ->_xmit_ETHER#2 ->_xmit_SLIP#2 ->_xmit_NETROM ->&obj_hash[i].lock ->pool_lock#2 ->&____s->seqcount ->&sch->q.lock FD: 237 BD: 4 +.+.: &ldata->atomic_read_lock ->&tty->termios_rwsem ->(work_completion)(&buf->work) ->&rq->__lock FD: 1 BD: 5 +.+.: (work_completion)(&buf->work) FD: 31 BD: 1 ..-.: &(&idev->mc_dad_work)->timer FD: 161 BD: 1 +.+.: (wq_completion)mld ->(work_completion)(&(&idev->mc_dad_work)->work) ->(work_completion)(&(&idev->mc_ifc_work)->work) ->&rq->__lock FD: 159 BD: 2 +.+.: (work_completion)(&(&idev->mc_dad_work)->work) ->&idev->mc_lock FD: 31 BD: 3 +.+.: &net->packet.sklist_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock FD: 227 BD: 3 +.+.: sk_lock-AF_PACKET ->slock-AF_PACKET ->&po->bind_lock ->&obj_hash[i].lock ->&rq->__lock ->&mm->mmap_lock ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->pcpu_alloc_mutex ->pack_mutex ->batched_entropy_u32.lock ->text_mutex ->&fp->aux->used_maps_mutex ->&rnp->exp_wq[2] ->&c->lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock FD: 1 BD: 4 +...: slock-AF_PACKET FD: 16 BD: 4 +.+.: &po->bind_lock ->ptype_lock ->pool_lock#2 ->&dir->lock#2 FD: 1 BD: 3962 +.-.: rlock-AF_PACKET FD: 1 BD: 1 +...: wlock-AF_PACKET FD: 2 BD: 4508 ..-.: &pl->lock ->key#12 FD: 1 BD: 122 ....: key#14 FD: 31 BD: 1 ..-.: &(&idev->mc_ifc_work)->timer FD: 159 BD: 2 +.+.: (work_completion)(&(&idev->mc_ifc_work)->work) ->&idev->mc_lock ->&lock->wait_lock ->&p->pi_lock ->&rq->__lock FD: 14 BD: 3890 +.-.: &ul->lock#2 ->pool_lock#2 ->&dir->lock#2 ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 16 BD: 3952 ++--: &n->lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&(&n->ha_lock)->lock ->&____s->seqcount#9 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount FD: 1 BD: 3960 +.--: &____s->seqcount#9 FD: 38 BD: 2 +.+.: (work_completion)(&w->work)#2 ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->nf_conntrack_mutex ->nf_conntrack_mutex.wait_lock ->&rq->__lock ->&p->pi_lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 31 BD: 1 ..-.: &(&ifa->dad_work)->timer FD: 1 BD: 3965 ...-: &____s->seqcount#10 FD: 1 BD: 3 +.+.: fanout_mutex FD: 1 BD: 3 +...: clock-AF_PACKET FD: 1 BD: 3 ....: elock-AF_PACKET FD: 1 BD: 105 +.-.: &ct->lock FD: 76 BD: 1 +.-.: (&dev->watchdog_timer) ->&dev->tx_global_lock FD: 65 BD: 1 +.-.: (&lapb->t1timer) ->&lapb->lock FD: 31 BD: 1 ..-.: &(&dm_bufio_cleanup_old_work)->timer FD: 16 BD: 1 +.+.: (wq_completion)dm_bufio_cache ->(work_completion)(&(&dm_bufio_cleanup_old_work)->work) FD: 15 BD: 2 +.+.: (work_completion)(&(&dm_bufio_cleanup_old_work)->work) ->dm_bufio_clients_lock ->&obj_hash[i].lock ->&base->lock FD: 31 BD: 1 ..-.: &(&tbl->gc_work)->timer FD: 50 BD: 2 +.+.: (work_completion)(&(&tbl->gc_work)->work) ->&tbl->lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 5 BD: 3948 +.-.: &nf_conntrack_locks[i] ->&nf_conntrack_locks[i]/1 ->batched_entropy_u8.lock FD: 4 BD: 3949 +.-.: &nf_conntrack_locks[i]/1 ->batched_entropy_u8.lock FD: 1 BD: 106 +.-.: &hashinfo->ehash_locks[i] FD: 2 BD: 3953 +.-.: &(&n->ha_lock)->lock ->&____s->seqcount#9 FD: 1 BD: 3952 +.-.: lock#8 FD: 1 BD: 3953 ..-.: id_table_lock FD: 1 BD: 94 ..-.: (&req->rsk_timer) FD: 1 BD: 94 +.-.: &queue->rskq_lock FD: 8 BD: 94 +.-.: tcp_metrics_lock ->&c->lock ->&n->list_lock ->pool_lock#2 FD: 81 BD: 68 +.-.: slock-AF_INET/1 ->tk_core.seq.seqcount ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount ->quarantine_lock ->&meta->lock ->&hashinfo->ehash_locks[i] ->&tcp_hashinfo.bhash[i].lock ->elock-AF_INET ->&zone->lock ->&sctp_ep_hashtable[i].lock ->clock-AF_INET ->krc.lock ->&sctp_port_hashtable[i].lock ->key#22 ->&____s->seqcount#2 FD: 97 BD: 1 +.-.: (&icsk->icsk_delack_timer) ->slock-AF_INET ->slock-AF_INET6 ->k-slock-AF_INET FD: 1 BD: 84 +.-.: &sd->defer_lock FD: 1 BD: 87 ..-.: elock-AF_INET FD: 98 BD: 1 +.-.: (&icsk->icsk_retransmit_timer) ->slock-AF_INET ->slock-AF_INET6 ->k-slock-AF_INET6 ->k-slock-AF_INET FD: 1 BD: 116 ....: key#15 FD: 85 BD: 114 +.+.: &sbi->s_orphan_lock ->&ei->i_raw_lock ->&ret->b_state_lock ->&rq->__lock ->&lock->wait_lock ->rcu_node_0 ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->&mapping->private_lock ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->bit_wait_table + i ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 31 BD: 1 ..-.: drivers/regulator/core.c:6266 FD: 4 BD: 2 +.+.: (regulator_init_complete_work).work ->&k->list_lock ->&k->k_lock FD: 80 BD: 1 .+.+: kn->active#47 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] FD: 32 BD: 3 +.+.: (work_completion)(work) ->lock#4 ->lock#5 ->&rq->__lock FD: 1 BD: 1 +.+.: &futex_queues[i].lock FD: 1 BD: 4 ....: &on->poll FD: 1 BD: 3 +.+.: module_mutex FD: 3 BD: 66 +.+.: once_mutex ->crngs.lock FD: 206 BD: 1 .+.+: sb_writers#9 ->&attr->mutex ->&mm->mmap_lock FD: 205 BD: 2 +.+.: &attr->mutex ->&mm->mmap_lock FD: 120 BD: 1 +.+.: &type->s_umount_key#41/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&root->kernfs_rwsem ->&sb->s_type->i_lock_key#30 ->&root->kernfs_supers_rwsem ->&dentry->d_lock FD: 41 BD: 310 +.+.: &sb->s_type->i_lock_key#30 ->&dentry->d_lock FD: 789 BD: 1 .+.+: sb_writers#10 ->mount_lock ->&type->i_mutex_dir_key#6 ->fs_reclaim ->&mm->mmap_lock ->&of->mutex ->&obj_hash[i].lock ->&type->i_mutex_dir_key#6/1 FD: 83 BD: 2 ++++: &type->i_mutex_dir_key#6 ->tomoyo_ss ->tk_core.seq.seqcount ->&root->kernfs_iattr_rwsem ->rename_lock.seqcount ->fs_reclaim ->&dentry->d_lock ->&root->kernfs_rwsem ->&sb->s_type->i_lock_key#30 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->&xa->xa_lock#3 ->&rq->__lock ->&obj_hash[i].lock ->stock_lock FD: 80 BD: 1 ++++: kn->active#48 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->pool_lock#2 FD: 1 BD: 1 +.+.: &sb->s_type->i_mutex_key#15 FD: 120 BD: 1 +.+.: &type->s_umount_key#42/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->&c->lock ->list_lrus_mutex ->sb_lock ->&root->kernfs_rwsem ->&sb->s_type->i_lock_key#31 ->&root->kernfs_supers_rwsem ->&dentry->d_lock ->&n->list_lock FD: 41 BD: 310 +.+.: &sb->s_type->i_lock_key#31 ->&dentry->d_lock FD: 105 BD: 1 ++++: &type->s_umount_key#43 ->shrinker_rwsem ->percpu_ref_switch_lock ->&root->kernfs_supers_rwsem ->rename_lock.seqcount ->&dentry->d_lock ->&sb->s_type->i_lock_key#31 ->&s->s_inode_list_lock ->&xa->xa_lock#7 ->inode_hash_lock ->&obj_hash[i].lock ->pool_lock#2 ->&fsnotify_mark_srcu ->sb_lock ->&lru->node[i].lock ->&rq->__lock FD: 755 BD: 2 +.+.: (work_completion)(&cgrp->bpf.release_work) ->cgroup_mutex ->percpu_ref_switch_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 760 BD: 1 +.+.: (wq_completion)cgroup_destroy ->(work_completion)(&css->destroy_work) ->(work_completion)(&(&css->destroy_rwork)->work) FD: 755 BD: 2 +.+.: (work_completion)(&css->destroy_work) ->cgroup_mutex ->&obj_hash[i].lock ->pool_lock#2 FD: 758 BD: 2 +.+.: (work_completion)(&(&css->destroy_rwork)->work) ->percpu_ref_switch_lock ->&obj_hash[i].lock ->pool_lock#2 ->&cgrp->pidlist_mutex ->(wq_completion)cgroup_pidlist_destroy ->&wq->mutex ->(work_completion)(&cgrp->release_agent_work) ->cgroup_mutex ->cgroup_rstat_lock ->pcpu_lock ->&root->kernfs_rwsem ->kernfs_idr_lock ->quarantine_lock FD: 1 BD: 3 +.+.: &cgrp->pidlist_mutex FD: 1 BD: 3 +.+.: (wq_completion)cgroup_pidlist_destroy FD: 1 BD: 3 +.+.: (work_completion)(&cgrp->release_agent_work) FD: 789 BD: 1 .+.+: sb_writers#11 ->mount_lock ->&type->i_mutex_dir_key#7 ->fs_reclaim ->&mm->mmap_lock ->&of->mutex ->&obj_hash[i].lock ->&type->i_mutex_dir_key#7/1 ->&c->lock FD: 76 BD: 2 ++++: &type->i_mutex_dir_key#7 ->tomoyo_ss ->tk_core.seq.seqcount ->&root->kernfs_iattr_rwsem ->rename_lock.seqcount ->fs_reclaim ->&dentry->d_lock ->&root->kernfs_rwsem ->&sb->s_type->i_lock_key#31 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->&xa->xa_lock#3 ->&obj_hash[i].lock ->stock_lock ->&n->list_lock FD: 1 BD: 16 +.+.: &dom->lock FD: 80 BD: 1 .+.+: kn->active#49 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] FD: 1 BD: 1 +.+.: &sb->s_type->i_mutex_key#16 FD: 267 BD: 1 .+.+: kn->active#50 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->cpu_hotplug_lock FD: 43 BD: 3 +.+.: &type->s_umount_key#44 ->sb_lock ->&dentry->d_lock FD: 109 BD: 2 +.+.: &sb->s_type->i_mutex_key#17 ->namespace_sem ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->&c->lock ->&____s->seqcount ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#26 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->pin_fs_lock ->sb_lock ->&type->s_umount_key#44 ->mnt_id_ida.xa_lock ->pcpu_alloc_mutex ->mount_lock ->&obj_hash[i].lock ->entries_lock FD: 219 BD: 1 .+.+: sb_writers#12 ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->&sb->s_type->i_mutex_key#17 FD: 1 BD: 67 +...: &pn->hash_lock FD: 46 BD: 1 +...: &net->ipv6.fib6_gc_lock ->&obj_hash[i].lock FD: 1 BD: 65 +...: _xmit_IEEE802154 FD: 33 BD: 3 ..-.: &ei->i_completed_io_lock FD: 155 BD: 1 +.+.: (wq_completion)ext4-rsv-conversion ->(work_completion)(&ei->i_rsv_conversion_work) FD: 154 BD: 2 +.+.: (work_completion)(&ei->i_rsv_conversion_work) ->&ei->i_completed_io_lock ->&journal->j_state_lock ->jbd2_handle ->&obj_hash[i].lock ->pool_lock#2 ->&ext4__ioend_wq[i] ->&ret->b_uptodate_lock ->&folio_wait_table[i] ->rcu_node_0 ->&rq->__lock ->mmu_notifier_invalidate_range_start ->&rcu_state.expedited_wq ->&c->lock ->&lruvec->lru_lock FD: 1 BD: 117 ....: &journal->j_wait_reserved FD: 1 BD: 3 ....: &ext4__ioend_wq[i] FD: 77 BD: 8 +.+.: swap_cgroup_mutex ->fs_reclaim ->&____s->seqcount ->pool_lock#2 FD: 86 BD: 8 +.+.: swapon_mutex ->fs_reclaim ->pool_lock#2 ->swap_lock ->percpu_ref_switch_lock ->(console_sem).lock FD: 10 BD: 4476 +.+.: &p->lock#2 ->swap_avail_lock ->&ctrl->lock ->&tree->lock ->&xa->xa_lock#19 FD: 1 BD: 4477 +.+.: swap_avail_lock FD: 1 BD: 8 ....: proc_poll_wait.lock FD: 267 BD: 1 +.+.: swap_slots_cache_enable_mutex ->cpu_hotplug_lock ->swap_lock FD: 1 BD: 110 +.+.: swap_slots_cache_mutex FD: 86 BD: 117 +.+.: &lg->lg_mutex ->&ei->i_prealloc_lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&mapping->private_lock ->&ret->b_state_lock ->&pa->pa_lock ->&lg->lg_prealloc_lock ->&rq->__lock ->&c->lock FD: 1 BD: 118 +.+.: &pa->pa_lock FD: 1 BD: 118 +.+.: &lg->lg_prealloc_lock FD: 29 BD: 117 ..-.: &rq_wait->wait ->&p->pi_lock FD: 32 BD: 1 +.-.: (&timer) ->&obj_hash[i].lock ->&base->lock ->&txlock ->&txwq FD: 1 BD: 4017 ..-.: &list->lock#5 FD: 5 BD: 66 +...: _xmit_SLIP#2 ->&eql->queue.lock FD: 35 BD: 68 +...: _xmit_NETROM ->(console_sem).lock ->console_owner_lock ->console_owner ->&obj_hash[i].lock ->pool_lock#2 FD: 5 BD: 1 +...: _xmit_X25#2 ->&lapbeth->up_lock FD: 65 BD: 1 +.-.: (&n->timer) ->&n->lock ->pool_lock#2 ->&dir->lock#2 ->&c->lock ->&ul->lock#2 ->&obj_hash[i].lock ->icmp_global.lock ->&n->list_lock ->nl_table_lock ->nl_table_wait.lock ->&dir->lock ->stock_lock FD: 31 BD: 1 ..-.: net/wireless/reg.c:236 FD: 745 BD: 2 +.+.: (reg_check_chans).work ->&rq->__lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock FD: 31 BD: 1 ..-.: net/wireless/reg.c:533 FD: 745 BD: 2 +.+.: (crda_timeout).work ->rtnl_mutex FD: 82 BD: 1 +.-.: (&sk->sk_timer) ->slock-AF_INET FD: 1 BD: 66 ..-.: &list->lock#6 FD: 1 BD: 66 ..-.: &list->lock#7 FD: 1 BD: 1 ..-.: &list->lock#8 FD: 1 BD: 4 +.-.: x25_list_lock FD: 1 BD: 1 +.-.: x25_forward_list_lock FD: 1 BD: 159 ....: &newf->resize_wait FD: 3 BD: 121 ....: &kcov->lock ->kcov_remote_lock FD: 125 BD: 1 +.+.: pid_caches_mutex ->slab_mutex FD: 43 BD: 1 +.+.: &type->s_umount_key#45 ->sb_lock ->&dentry->d_lock FD: 115 BD: 1 ++++: &sb->s_type->i_mutex_key#18 ->namespace_sem ->&dentry->d_lock ->tk_core.seq.seqcount FD: 1 BD: 23 ++++: hci_sk_list.lock FD: 1 BD: 1 +.+.: (work_completion)(&(&data->open_timeout)->work) FD: 292 BD: 1 +.+.: &data->open_mutex ->fs_reclaim ->pool_lock#2 ->&____s->seqcount ->&obj_hash[i].lock ->&x->wait#9 ->hci_index_ida.xa_lock ->cpu_hotplug_lock ->wq_pool_mutex ->&c->lock ->&n->list_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->uevent_sock_mutex.wait_lock ->&p->pi_lock ->subsys mutex#80 ->&dev->devres_lock ->triggers_list_lock ->leds_list_lock ->rfkill_global_mutex ->rfkill_global_mutex.wait_lock ->&rfkill->lock ->hci_dev_list_lock ->tk_core.seq.seqcount ->hci_sk_list.lock ->(pm_chain_head).rwsem ->&rq->__lock ->&list->lock#11 ->&data->read_wait FD: 1 BD: 2 ....: hci_index_ida.xa_lock FD: 1 BD: 192 +.+.: uevent_sock_mutex.wait_lock FD: 31 BD: 22 +.+.: subsys mutex#80 ->&k->k_lock ->&rq->__lock ->&lock->wait_lock FD: 1 BD: 14 ++++: hci_dev_list_lock FD: 195 BD: 1 +.+.: (wq_completion)hci1 ->(work_completion)(&hdev->power_on) ->(work_completion)(&hdev->cmd_sync_work) FD: 193 BD: 10 +.+.: (work_completion)(&hdev->power_on) ->&hdev->req_lock ->fs_reclaim ->pool_lock#2 ->tk_core.seq.seqcount ->hci_sk_list.lock ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount FD: 192 BD: 12 +.+.: &hdev->req_lock ->&obj_hash[i].lock ->&list->lock#9 ->pool_lock#2 ->&list->lock#10 ->&hdev->req_wait_q ->&base->lock ->&rq->__lock ->(&timer.timer) ->&c->lock ->&n->list_lock ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->tk_core.seq.seqcount ->hci_sk_list.lock ->&cfs_rq->removed.lock ->(work_completion)(&(&hdev->interleave_scan)->work) ->hci_dev_list_lock ->(work_completion)(&hdev->tx_work) ->(work_completion)(&hdev->rx_work) ->&wq->mutex ->(wq_completion)hci0#2 ->&hdev->lock ->&lock->wait_lock ->&list->lock#11 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&(&hdev->cmd_timer)->work) FD: 1 BD: 24 ....: &list->lock#9 FD: 1 BD: 13 ....: &list->lock#10 FD: 29 BD: 20 ....: &hdev->req_wait_q ->&p->pi_lock FD: 1 BD: 22 ....: &list->lock#11 FD: 29 BD: 22 ....: &data->read_wait ->&p->pi_lock FD: 207 BD: 3 +.+.: sk_lock-AF_BLUETOOTH-BTPROTO_HCI ->slock-AF_BLUETOOTH-BTPROTO_HCI ->sock_cookie_ida.xa_lock ->&p->alloc_lock ->pool_lock#2 ->tk_core.seq.seqcount ->hci_sk_list.lock ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->clock-AF_BLUETOOTH ->&____s->seqcount ->mgmt_chan_list_lock ->&rq->__lock ->hci_dev_list_lock ->fs_reclaim ->(wq_completion)hci0 ->&wq->mutex ->&hdev->req_lock ->remove_cache_srcu FD: 1 BD: 4 +...: slock-AF_BLUETOOTH-BTPROTO_HCI FD: 1 BD: 4 ....: sock_cookie_ida.xa_lock FD: 195 BD: 4 +.+.: (wq_completion)hci0 ->(work_completion)(&hdev->power_on) ->(work_completion)(&hdev->cmd_sync_work) FD: 195 BD: 1 +.+.: (wq_completion)hci2 ->(work_completion)(&hdev->power_on) ->(work_completion)(&hdev->cmd_sync_work) FD: 195 BD: 1 +.+.: (wq_completion)hci3 ->(work_completion)(&hdev->power_on) ->(work_completion)(&hdev->cmd_sync_work) FD: 195 BD: 1 +.+.: (wq_completion)hci4 ->(work_completion)(&hdev->power_on) ->(work_completion)(&hdev->cmd_sync_work) FD: 186 BD: 1 +.+.: (wq_completion)hci1#2 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&hdev->tx_work) ->(work_completion)(&conn->pending_rx_work) ->(work_completion)(&(&hdev->cmd_timer)->work) ->(work_completion)(&(&conn->disc_work)->work) FD: 80 BD: 19 +.+.: (work_completion)(&hdev->cmd_work) ->&list->lock#9 ->fs_reclaim ->pool_lock#2 ->tk_core.seq.seqcount ->&list->lock#11 ->&data->read_wait ->&obj_hash[i].lock ->&c->lock ->&rq->__lock ->&n->list_lock ->&____s->seqcount FD: 186 BD: 13 +.+.: (wq_completion)hci0#2 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&hdev->tx_work) ->(work_completion)(&conn->pending_rx_work) ->(work_completion)(&(&hdev->cmd_timer)->work) ->(work_completion)(&(&conn->disc_work)->work) FD: 179 BD: 19 +.+.: (work_completion)(&hdev->rx_work) ->&list->lock#9 ->lock#6 ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->init_mm.page_table_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&hdev->lock ->(console_sem).lock ->console_owner_lock ->console_owner ->&rq->__lock ->&obj_hash[i].lock ->&hdev->req_wait_q ->&c->lock ->&base->lock ->&n->list_lock ->chan_list_lock FD: 171 BD: 20 +.+.: &hdev->lock ->fs_reclaim ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock ->&x->wait#9 ->&k->list_lock ->&rq->__lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->&k->k_lock ->subsys mutex#80 ->&____s->seqcount ->&list->lock#9 ->&hdev->unregister_lock ->hci_cb_list_lock ->&base->lock ->tk_core.seq.seqcount ->hci_sk_list.lock ->&n->list_lock ->(work_completion)(&(&conn->disc_work)->work) ->(work_completion)(&(&conn->auto_accept_work)->work) ->(work_completion)(&(&conn->idle_work)->work) ->&list->lock#12 ->dev_pm_qos_sysfs_mtx ->&sem->wait_lock ->&p->pi_lock ->kernfs_idr_lock ->deferred_probe_mutex ->device_links_lock ->mmu_notifier_invalidate_range_start FD: 186 BD: 1 +.+.: (wq_completion)hci2#2 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&hdev->tx_work) ->(work_completion)(&conn->pending_rx_work) ->(work_completion)(&(&hdev->cmd_timer)->work) ->(work_completion)(&(&conn->disc_work)->work) FD: 195 BD: 1 +.+.: (wq_completion)hci5 ->(work_completion)(&hdev->power_on) ->(work_completion)(&hdev->cmd_sync_work) FD: 186 BD: 1 +.+.: (wq_completion)hci3#2 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&hdev->tx_work) ->(work_completion)(&conn->pending_rx_work) ->(work_completion)(&(&hdev->cmd_timer)->work) ->(work_completion)(&(&conn->disc_work)->work) FD: 186 BD: 1 +.+.: (wq_completion)hci4#2 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&hdev->tx_work) ->(work_completion)(&conn->pending_rx_work) ->(work_completion)(&(&hdev->cmd_timer)->work) ->(work_completion)(&(&conn->disc_work)->work) FD: 186 BD: 1 +.+.: (wq_completion)hci5#2 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&hdev->tx_work) ->(work_completion)(&conn->pending_rx_work) ->(work_completion)(&(&hdev->cmd_timer)->work) ->(work_completion)(&(&conn->disc_work)->work) FD: 1 BD: 5 +...: clock-AF_BLUETOOTH FD: 1 BD: 4 ....: rlock-AF_BLUETOOTH FD: 1 BD: 4 ....: wlock-AF_BLUETOOTH FD: 1 BD: 1 +.+.: &sb->s_type->i_mutex_key#19 FD: 78 BD: 21 +.+.: &hdev->unregister_lock ->fs_reclaim ->pool_lock#2 ->&hdev->cmd_sync_work_lock ->&rq->__lock FD: 1 BD: 22 +.+.: &hdev->cmd_sync_work_lock FD: 193 BD: 10 +.+.: (work_completion)(&hdev->cmd_sync_work) ->&hdev->cmd_sync_work_lock ->&hdev->req_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 22 +.+.: &conn->ident_lock FD: 1 BD: 23 ....: &list->lock#12 FD: 28 BD: 24 +.+.: &conn->chan_lock ->&rq->__lock FD: 33 BD: 19 +.+.: (work_completion)(&hdev->tx_work) ->&list->lock#12 ->tk_core.seq.seqcount ->&list->lock#11 ->&data->read_wait ->&list->lock#9 FD: 2 BD: 19 +.+.: (work_completion)(&conn->pending_rx_work) ->&list->lock#13 FD: 1 BD: 23 ....: &list->lock#13 FD: 1 BD: 1 +.+.: &undo_list->lock FD: 1 BD: 64 +...: &nr_netdev_addr_lock_key FD: 1 BD: 64 +...: listen_lock FD: 2 BD: 12 +.+.: rdma_nets.xa_lock ->pool_lock#2 FD: 1 BD: 4 +.+.: &____s->seqcount#11 FD: 2 BD: 3 +.+.: &(&net->ipv4.ping_group_range.lock)->lock ->&____s->seqcount#11 FD: 2 BD: 64 +.+.: &r->consumer_lock ->&r->producer_lock FD: 1 BD: 3891 +.-.: &r->producer_lock FD: 19 BD: 3892 +...: &bridge_netdev_addr_lock_key ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&n->list_lock ->&obj_hash[i].lock ->krc.lock ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 37 BD: 69 +.-.: &br->hash_lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->nl_table_lock ->&obj_hash[i].lock ->nl_table_wait.lock ->&n->list_lock ->&____s->seqcount#2 ->quarantine_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock FD: 1 BD: 73 +.+.: nf_hook_mutex.wait_lock FD: 1 BD: 64 +.+.: j1939_netdev_lock FD: 2 BD: 3888 +...: &dev_addr_list_lock_key#2 ->pool_lock#2 FD: 7 BD: 64 +...: &bat_priv->tvlv.handler_list_lock ->pool_lock#2 ->&c->lock FD: 14 BD: 71 +...: &bat_priv->tvlv.container_list_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount#2 ->&n->list_lock ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->quarantine_lock FD: 2 BD: 3888 +...: &batadv_netdev_addr_lock_key ->pool_lock#2 FD: 10 BD: 69 +...: &bat_priv->softif_vlan_list_lock ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&____s->seqcount#2 ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 1 BD: 72 +...: key#16 FD: 4 BD: 71 +...: &bat_priv->tt.changes_list_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 33 BD: 1 ..-.: &(&bat_priv->nc.work)->timer FD: 62 BD: 1 +.+.: (wq_completion)bat_events ->(work_completion)(&(&bat_priv->nc.work)->work) ->(work_completion)(&(&bat_priv->mcast.work)->work) ->(work_completion)(&(&bat_priv->orig_work)->work) ->(work_completion)(&(&forw_packet_aggr->delayed_work)->work) ->(work_completion)(&(&bat_priv->tt.work)->work) ->(work_completion)(&(&bat_priv->dat.work)->work) ->(work_completion)(&(&bat_priv->bla.work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 34 BD: 2 +.+.: (work_completion)(&(&bat_priv->nc.work)->work) ->key#17 ->key#18 ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock ->&cfs_rq->removed.lock FD: 1 BD: 3 +...: key#17 FD: 1 BD: 3 +...: key#18 FD: 125 BD: 65 +.+.: init_lock ->slab_mutex ->fs_reclaim ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->crngs.lock FD: 1 BD: 3900 +.-.: deferred_lock FD: 745 BD: 2 +.+.: deferred_process_work ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock FD: 1 BD: 65 ....: target_list_lock FD: 48 BD: 66 +.-.: &br->lock ->&br->hash_lock ->lweventlist_lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&dir->lock#2 ->deferred_lock ->(console_sem).lock ->&c->lock ->nl_table_lock ->nl_table_wait.lock ->&br->multicast_lock ->&n->list_lock ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock FD: 33 BD: 1 ..-.: &(&bat_priv->mcast.work)->timer FD: 38 BD: 2 +.+.: (work_completion)(&(&bat_priv->mcast.work)->work) ->pool_lock#2 ->&bat_priv->mcast.mla_lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->rcu_node_0 ->kfence_freelist_lock ->&meta->lock ->quarantine_lock ->&cfs_rq->removed.lock FD: 17 BD: 3 +.+.: &bat_priv->mcast.mla_lock ->pool_lock#2 ->key#16 ->&____s->seqcount ->&obj_hash[i].lock ->&bat_priv->tt.changes_list_lock ->&bat_priv->tvlv.container_list_lock FD: 1 BD: 3876 +.+.: &bond->stats_lock/1 FD: 101 BD: 1 +.+.: (wq_completion)bond0 ->(work_completion)(&(&slave->notify_work)->work) FD: 100 BD: 3929 +.+.: (work_completion)(&(&slave->notify_work)->work) ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->rtnl_mutex.wait_lock ->&p->pi_lock ->pool_lock#2 ->&____s->seqcount ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->stock_lock FD: 33 BD: 1 ..-.: &(&slave->notify_work)->timer FD: 101 BD: 1 +.+.: (wq_completion)bond0#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond0#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond0#4 ->(work_completion)(&(&slave->notify_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond0#5 ->(work_completion)(&(&slave->notify_work)->work) FD: 158 BD: 65 +.+.: team->team_lock_key ->fs_reclaim ->pool_lock#2 ->netpoll_srcu ->net_rwsem ->&tn->lock ->_xmit_ETHER ->&dir->lock#2 ->&c->lock ->input_pool.lock ->&ndev->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->remove_cache_srcu ->&____s->seqcount ->(console_sem).lock ->&rq->__lock ->lweventlist_lock ->&____s->seqcount#2 ->&n->list_lock FD: 158 BD: 65 +.+.: team->team_lock_key#2 ->fs_reclaim ->&c->lock ->netpoll_srcu ->net_rwsem ->&tn->lock ->_xmit_ETHER ->&dir->lock#2 ->&____s->seqcount ->pool_lock#2 ->input_pool.lock ->&ndev->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->lweventlist_lock ->(console_sem).lock ->&rq->__lock FD: 101 BD: 1 +.+.: (wq_completion)bond0#6 ->(work_completion)(&(&slave->notify_work)->work) FD: 158 BD: 65 +.+.: team->team_lock_key#3 ->fs_reclaim ->netpoll_srcu ->net_rwsem ->&tn->lock ->_xmit_ETHER ->&dir->lock#2 ->input_pool.lock ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&ndev->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->lweventlist_lock ->(console_sem).lock ->quarantine_lock ->remove_cache_srcu ->&n->list_lock ->&rq->__lock FD: 33 BD: 1 ..-.: &(&bat_priv->orig_work)->timer FD: 32 BD: 2 +.+.: (work_completion)(&(&bat_priv->orig_work)->work) ->key#19 ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->pool_lock#2 FD: 1 BD: 3 +...: key#19 FD: 158 BD: 65 +.+.: team->team_lock_key#4 ->fs_reclaim ->netpoll_srcu ->net_rwsem ->&tn->lock ->_xmit_ETHER ->&dir->lock#2 ->input_pool.lock ->&ndev->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->&c->lock ->&____s->seqcount ->lweventlist_lock ->(console_sem).lock ->&rq->__lock FD: 158 BD: 65 +.+.: team->team_lock_key#5 ->fs_reclaim ->netpoll_srcu ->net_rwsem ->&tn->lock ->_xmit_ETHER ->&dir->lock#2 ->input_pool.lock ->&c->lock ->&____s->seqcount ->&ndev->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->&n->list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->lweventlist_lock ->(console_sem).lock ->pool_lock#2 FD: 41 BD: 67 +.+.: &hard_iface->bat_iv.ogm_buff_mutex ->crngs.lock ->pool_lock#2 ->batched_entropy_u8.lock ->&bat_priv->forw_bat_list_lock ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq ->kfence_freelist_lock ->&n->list_lock ->&bat_priv->tt.commit_lock ->&bat_priv->tvlv.container_list_lock ->&____s->seqcount#2 ->&cfs_rq->removed.lock FD: 14 BD: 68 +...: &bat_priv->forw_bat_list_lock ->&obj_hash[i].lock ->&base->lock FD: 31 BD: 1 ..-.: drivers/net/wireguard/ratelimiter.c:20 FD: 32 BD: 2 +.+.: (gc_work).work ->tk_core.seq.seqcount ->"ratelimiter_table_lock" ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 1 BD: 3 +.+.: "ratelimiter_table_lock" FD: 158 BD: 65 +.+.: team->team_lock_key#6 ->fs_reclaim ->netpoll_srcu ->net_rwsem ->&tn->lock ->_xmit_ETHER ->&dir->lock#2 ->input_pool.lock ->&ndev->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->&c->lock ->&____s->seqcount ->pool_lock#2 ->lweventlist_lock ->(console_sem).lock ->&rq->__lock ->&n->list_lock FD: 33 BD: 1 ..-.: &(&hdev->cmd_timer)->timer FD: 41 BD: 19 +.+.: (work_completion)(&(&hdev->cmd_timer)->work) ->(console_sem).lock ->console_owner_lock ->console_owner ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock FD: 1 BD: 64 +...: _xmit_NONE FD: 1 BD: 64 +...: lock#9 FD: 1 BD: 65 ...-: &____s->seqcount#12 FD: 1 BD: 3892 +.-.: &hsr->list_lock FD: 8 BD: 3888 +...: &vlan_netdev_addr_lock_key ->pool_lock#2 ->&c->lock ->&____s->seqcount FD: 8 BD: 3888 +...: &macvlan_netdev_addr_lock_key ->pool_lock#2 ->&c->lock ->&____s->seqcount FD: 17 BD: 64 +.-.: (&app->join_timer) ->&app->lock ->&list->lock#14 ->batched_entropy_u32.lock ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 65 +.-.: &app->lock FD: 1 BD: 65 ..-.: &list->lock#14 FD: 33 BD: 1 ..-.: &(&forw_packet_aggr->delayed_work)->timer FD: 45 BD: 2 +.+.: (work_completion)(&(&forw_packet_aggr->delayed_work)->work) ->&hard_iface->bat_iv.ogm_buff_mutex ->&bat_priv->forw_bat_list_lock ->&obj_hash[i].lock ->pool_lock#2 ->&meta->lock ->kfence_freelist_lock ->&rq->__lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->batched_entropy_u8.lock ->&n->list_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->quarantine_lock ->&cfs_rq->removed.lock FD: 17 BD: 64 +.-.: (&app->join_timer)#2 ->&app->lock#2 ->&list->lock#15 FD: 15 BD: 66 +.-.: &app->lock#2 ->batched_entropy_u32.lock ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 65 ..-.: &list->lock#15 FD: 7 BD: 3888 +...: &dev_addr_list_lock_key#3 ->pool_lock#2 ->&c->lock FD: 1 BD: 64 ....: &xa->xa_lock#13 FD: 1 BD: 3888 +...: &dev_addr_list_lock_key#3/1 FD: 2 BD: 64 +.+.: &tap_major->minor_lock ->pool_lock#2 FD: 3 BD: 64 +.+.: subsys mutex#81 ->&k->k_lock FD: 771 BD: 1 .+.+: kn->active#51 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->nsim_bus_dev_list_lock ->&c->lock ->nsim_bus_dev_list_lock.wait_lock ->&p->pi_lock FD: 769 BD: 9 +.+.: nsim_bus_dev_list_lock ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->nsim_bus_dev_ids.xa_lock ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->nsim_bus_dev_list_lock.wait_lock ->&rq->__lock ->subsys mutex#82 FD: 771 BD: 1 .+.+: kn->active#52 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->nsim_bus_dev_list_lock ->nsim_bus_dev_list_lock.wait_lock ->&p->pi_lock ->&rq->__lock ->&c->lock ->&____s->seqcount FD: 1 BD: 10 ....: nsim_bus_dev_ids.xa_lock FD: 2 BD: 18 +.+.: devlinks.xa_lock ->pool_lock#2 FD: 750 BD: 12 +.+.: &devlink->lock_key ->crngs.lock ->fs_reclaim ->pool_lock#2 ->devlinks.xa_lock ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&xa->xa_lock#14 ->pcpu_alloc_mutex ->&base->lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->batched_entropy_u32.lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&(&fn_net->fib_chain)->lock ->&devlink_port->type_lock ->stack_depot_init_mutex ->&rq->__lock ->&nsim_trap_data->trap_lock ->rcu_node_0 ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq FD: 9 BD: 18 +.+.: &xa->xa_lock#14 ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock FD: 1 BD: 3992 +...: &data->fib_event_queue_lock FD: 1 BD: 18 ....: &(&fn_net->fib_chain)->lock FD: 82 BD: 14 +.+.: (work_completion)(&data->fib_event_work) ->&data->fib_event_queue_lock ->&data->fib_lock ->&rq->__lock FD: 80 BD: 15 +.+.: &data->fib_lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->pool_lock ->&obj_hash[i].lock ->&base->lock ->&pool->lock ->&rq->__lock ->(&timer.timer) ->batched_entropy_u8.lock ->kfence_freelist_lock ->&n->list_lock ->&____s->seqcount#2 ->&cfs_rq->removed.lock ->remove_cache_srcu FD: 1 BD: 64 +...: &devlink_port->type_lock FD: 79 BD: 65 +.+.: bpf_devs_lock ->fs_reclaim ->pool_lock#2 ->&rq->__lock ->&obj_hash[i].lock ->&c->lock FD: 1 BD: 64 +.+.: (work_completion)(&(&devlink_port->type_warn_dw)->work) FD: 1 BD: 64 +.+.: &vn->sock_lock FD: 1 BD: 10 +.+.: nsim_bus_dev_list_lock.wait_lock FD: 31 BD: 1 ..-.: &(&nsim_dev->trap_data->trap_report_dw)->timer FD: 37 BD: 14 +.+.: (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&obj_hash[i].lock ->&base->lock ->&cfs_rq->removed.lock FD: 1 BD: 10 +.+.: subsys mutex#82 FD: 750 BD: 12 +.+.: &devlink->lock_key#2 ->crngs.lock ->fs_reclaim ->devlinks.xa_lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&n->list_lock ->&xa->xa_lock#14 ->pcpu_alloc_mutex ->&base->lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->batched_entropy_u32.lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&(&fn_net->fib_chain)->lock ->&devlink_port->type_lock ->stack_depot_init_mutex ->&rq->__lock ->&nsim_trap_data->trap_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 31 BD: 1 ..-.: &(&hwstats->traffic_dw)->timer FD: 29 BD: 14 +.+.: (work_completion)(&(&hwstats->traffic_dw)->work) ->&hwstats->hwsdev_list_lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 28 BD: 65 +.+.: &hwstats->hwsdev_list_lock ->&rq->__lock FD: 120 BD: 64 +.+.: devnet_rename_sem ->(console_sem).lock ->fs_reclaim ->pool_lock#2 ->&k->list_lock ->&root->kernfs_rwsem ->kernfs_rename_lock ->&c->lock ->uevent_sock_mutex ->&obj_hash[i].lock ->&rq->__lock ->&____s->seqcount ->&n->list_lock FD: 1 BD: 219 ....: kernfs_rename_lock FD: 28 BD: 66 +.+.: &nft_net->commit_mutex ->&rq->__lock FD: 749 BD: 12 +.+.: &devlink->lock_key#3 ->crngs.lock ->fs_reclaim ->devlinks.xa_lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&xa->xa_lock#14 ->&c->lock ->&____s->seqcount ->pool_lock#2 ->pcpu_alloc_mutex ->quarantine_lock ->remove_cache_srcu ->&base->lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->batched_entropy_u32.lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&(&fn_net->fib_chain)->lock ->&devlink_port->type_lock ->stack_depot_init_mutex ->&rq->__lock ->&nsim_trap_data->trap_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 32 BD: 20 +.+.: &nsim_trap_data->trap_lock ->&c->lock ->pool_lock#2 ->crngs.lock ->&nsim_dev->fa_cookie_lock ->&obj_hash[i].lock ->&____s->seqcount ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->quarantine_lock ->&____s->seqcount#2 ->&base->lock FD: 1 BD: 21 +...: &nsim_dev->fa_cookie_lock FD: 350 BD: 64 +.+.: &wg->device_update_lock ->&wg->static_identity.lock ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&c->lock ->pcpu_alloc_mutex ->&handshake->lock ->&obj_hash[i].lock ->tk_core.seq.seqcount ->&table->lock ->&peer->endpoint_lock ->&rq->__lock ->&n->list_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&dir->lock ->k-sk_lock-AF_INET ->k-slock-AF_INET ->cpu_hotplug_lock ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->&wg->socket_update_lock ->&list->lock#17 ->&pool->lock/1 ->&____s->seqcount#2 ->&wq->mutex ->wq_pool_mutex ->wq_mayday_lock ->&p->pi_lock ->&x->wait ->pcpu_lock ->&r->consumer_lock#2 ->rcu_state.barrier_mutex ->rcu_state.barrier_mutex.wait_lock ->init_lock ->&zone->lock ->rcu_state.exp_mutex.wait_lock FD: 79 BD: 121 ++++: &wg->static_identity.lock ->&handshake->lock ->&rq->__lock FD: 78 BD: 123 ++++: &handshake->lock ->&rq->__lock ->crngs.lock ->tk_core.seq.seqcount ->&table->lock#2 ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&____s->seqcount#2 ->&n->list_lock ->remove_cache_srcu ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 65 +.+.: &table->lock FD: 56 BD: 124 ++-.: &peer->endpoint_lock ->pool_lock#2 ->&obj_hash[i].lock FD: 757 BD: 12 +.+.: &devlink->lock_key#4 ->crngs.lock ->fs_reclaim ->devlinks.xa_lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&xa->xa_lock#14 ->pcpu_alloc_mutex ->&n->list_lock ->quarantine_lock ->remove_cache_srcu ->&base->lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->batched_entropy_u32.lock ->&rq->__lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->rcu_node_0 ->&(&fn_net->fib_chain)->lock ->&devlink_port->type_lock ->stack_depot_init_mutex ->&nsim_trap_data->trap_lock ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->&dentry->d_lock ->&fsnotify_mark_srcu ->&sb->s_type->i_lock_key#7 ->&s->s_inode_list_lock ->&xa->xa_lock#7 ->mount_lock ->rcu_state.barrier_mutex ->dev_base_lock ->lweventlist_lock ->netdev_unregistering_wq.lock ->krc.lock ->&dir->lock#2 ->(work_completion)(&(&devlink_port->type_warn_dw)->work) ->(work_completion)(&(&hwstats->traffic_dw)->work) ->&hwstats->hwsdev_list_lock ->(work_completion)(&data->fib_flush_work) ->(work_completion)(&data->fib_event_work) ->(work_completion)(&ht->run_work) ->&ht->mutex ->(work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) ->&x->wait#10 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->pcpu_lock ->®ion->snapshot_lock ->&____s->seqcount#2 ->stock_lock FD: 1 BD: 4 +.+.: genl_mutex.wait_lock FD: 1 BD: 64 +...: _xmit_SIT FD: 8 BD: 3888 +...: &bridge_netdev_addr_lock_key/1 ->pool_lock#2 ->&c->lock ->&____s->seqcount FD: 40 BD: 64 +.-.: (&brmctx->ip6_own_query.timer) ->&br->multicast_lock FD: 39 BD: 3899 +.-.: &br->multicast_lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&dir->lock#2 ->deferred_lock ->nl_table_lock ->nl_table_wait.lock ->&c->lock ->&____s->seqcount ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&____s->seqcount#2 ->quarantine_lock ->init_task.mems_allowed_seq.seqcount FD: 40 BD: 64 +.-.: (&brmctx->ip4_own_query.timer) ->&br->multicast_lock FD: 1 BD: 64 +...: _xmit_TUNNEL FD: 8 BD: 3888 +...: _xmit_IPGRE ->pool_lock#2 ->&c->lock ->&n->list_lock FD: 58 BD: 1 +.-.: (&in_dev->mr_ifc_timer) ->&obj_hash[i].lock ->batched_entropy_u32.lock ->&base->lock FD: 1 BD: 64 +...: _xmit_TUNNEL6 FD: 51 BD: 65 +.-.: _xmit_TUNNEL6#2 ->&obj_hash[i].lock ->pool_lock#2 ->&____s->seqcount FD: 31 BD: 1 ..-.: &(&br->gc_work)->timer FD: 39 BD: 65 +.+.: (work_completion)(&(&br->gc_work)->work) ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->&cfs_rq->removed.lock FD: 750 BD: 12 +.+.: &devlink->lock_key#5 ->crngs.lock ->fs_reclaim ->devlinks.xa_lock ->&c->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&xa->xa_lock#14 ->&____s->seqcount ->pool_lock#2 ->&n->list_lock ->pcpu_alloc_mutex ->&base->lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->batched_entropy_u32.lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&(&fn_net->fib_chain)->lock ->&devlink_port->type_lock ->stack_depot_init_mutex ->&rq->__lock ->&nsim_trap_data->trap_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 43 BD: 3890 +...: &dev_addr_list_lock_key/1 ->&c->lock ->&____s->seqcount ->&bridge_netdev_addr_lock_key ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock ->&n->list_lock ->&____s->seqcount#2 FD: 42 BD: 3888 +...: &dev_addr_list_lock_key#2/1 ->&c->lock ->&____s->seqcount FD: 16 BD: 64 +.-.: (&app->periodic_timer) ->&app->lock#2 FD: 750 BD: 12 +.+.: &devlink->lock_key#6 ->crngs.lock ->fs_reclaim ->devlinks.xa_lock ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&xa->xa_lock#14 ->&n->list_lock ->pcpu_alloc_mutex ->&base->lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->batched_entropy_u32.lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&(&fn_net->fib_chain)->lock ->&devlink_port->type_lock ->stack_depot_init_mutex ->&rq->__lock ->&nsim_trap_data->trap_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 40 BD: 1 +.-.: (&pmctx->ip6_own_query.timer) ->&br->multicast_lock FD: 40 BD: 1 +.-.: (&pmctx->ip4_own_query.timer) ->&br->multicast_lock FD: 2 BD: 1 +.-.: (&tun->flow_gc_timer) ->&tun->lock FD: 1 BD: 65 +.-.: &tun->lock FD: 31 BD: 1 ..-.: &(&conn->info_timer)->timer FD: 29 BD: 23 +.+.: (work_completion)(&(&conn->info_timer)->work) ->&conn->chan_lock ->&rq->__lock FD: 7 BD: 3888 +...: _xmit_ETHER/1 ->&c->lock ->&____s->seqcount FD: 1 BD: 115 +.+.: wq_pool_attach_mutex.wait_lock FD: 24 BD: 64 +.-.: (&hsr->announce_timer) FD: 23 BD: 3890 +.-.: &hsr->seqnr_lock ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->&meta->lock ->kfence_freelist_lock ->quarantine_lock FD: 1 BD: 3891 +.-.: &new_node->seq_out_lock FD: 1 BD: 64 +.+.: &nn->netlink_tap_lock FD: 7 BD: 3888 +...: &batadv_netdev_addr_lock_key/1 ->&c->lock ->&____s->seqcount FD: 43 BD: 3888 +...: &vlan_netdev_addr_lock_key/1 ->_xmit_ETHER ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&bridge_netdev_addr_lock_key ->&obj_hash[i].lock ->krc.lock FD: 42 BD: 3888 +...: &macvlan_netdev_addr_lock_key/1 ->_xmit_ETHER ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 FD: 1 BD: 3 +.-.: &list->lock#16 FD: 32 BD: 2 +.+.: (work_completion)(&port->bc_work) ->&list->lock#16 ->&obj_hash[i].lock ->pool_lock#2 ->quarantine_lock ->&rq->__lock FD: 19 BD: 65 +...: &ipvlan->addrs_lock ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->krc.lock FD: 42 BD: 3888 +...: &macsec_netdev_addr_lock_key/1 ->_xmit_ETHER ->&c->lock ->&____s->seqcount FD: 13 BD: 1 +...: dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 FD: 15 BD: 64 +.-.: (&hsr->prune_timer) ->&hsr->list_lock ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 3890 +...: key#20 FD: 33 BD: 1 ..-.: &(&bat_priv->tt.work)->timer FD: 35 BD: 2 +.+.: (work_completion)(&(&bat_priv->tt.work)->work) ->key#16 ->&rq->__lock ->key#21 ->&bat_priv->tt.req_list_lock ->&bat_priv->tt.roam_list_lock ->&obj_hash[i].lock ->&base->lock ->&cfs_rq->removed.lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 19 BD: 68 +...: &bat_priv->tt.commit_lock ->key#16 ->&bat_priv->softif_vlan_list_lock ->&bat_priv->tt.changes_list_lock ->&bat_priv->tt.last_changeset_lock ->pool_lock#2 ->&bat_priv->tvlv.container_list_lock ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount#2 ->&n->list_lock ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock FD: 1 BD: 3 +...: key#21 FD: 1 BD: 3 +...: &bat_priv->tt.req_list_lock FD: 1 BD: 3 +...: &bat_priv->tt.roam_list_lock FD: 1 BD: 3890 +...: &entry->crc_lock FD: 1 BD: 65 +.+.: &wg->socket_update_lock FD: 5 BD: 106 +.-.: &list->lock#17 ->&obj_hash[i].lock ->&____s->seqcount ->pool_lock#2 FD: 81 BD: 1 +.+.: (wq_completion)wg-kex-wg0 ->(work_completion)(&peer->transmit_handshake_work) ->(work_completion)(&peer->clear_peer_work) FD: 79 BD: 19 +.+.: (work_completion)(&peer->transmit_handshake_work) ->tk_core.seq.seqcount ->&wg->static_identity.lock ->&cookie->lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&peer->endpoint_lock ->batched_entropy_u8.lock ->&c->lock ->&n->list_lock ->&rq->__lock ->kfence_freelist_lock ->&____s->seqcount#2 ->&____s->seqcount ->rcu_node_0 ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq FD: 1 BD: 125 +...: &table->lock#2 FD: 28 BD: 57 ++++: &cookie->lock ->&rq->__lock FD: 2 BD: 69 +...: &bat_priv->tt.last_changeset_lock ->pool_lock#2 FD: 81 BD: 1 +.+.: (wq_completion)wg-kex-wg1 ->(work_completion)(&peer->transmit_handshake_work) ->(work_completion)(&peer->clear_peer_work) FD: 1 BD: 104 +.-.: &r->producer_lock#2 FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg0#2 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 111 BD: 37 +.+.: (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->&r->consumer_lock#2 ->&wg->static_identity.lock ->&peer->endpoint_lock ->tk_core.seq.seqcount ->&cookie->lock ->&handshake->lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&rq->__lock ->&list->lock#17 ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount ->rcu_node_0 ->&rcu_state.expedited_wq ->&meta->lock ->kfence_freelist_lock ->quarantine_lock FD: 1 BD: 102 +.+.: &r->consumer_lock#2 FD: 5 BD: 124 +.-.: &peer->keypairs.keypair_update_lock ->&table->lock#2 ->&obj_hash[i].lock ->pool_lock#2 FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg1#2 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg1 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 34 BD: 19 +.+.: (work_completion)(&peer->transmit_packet_work) ->&rq->__lock ->&obj_hash[i].lock ->&base->lock ->&peer->endpoint_lock ->batched_entropy_u8.lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg0 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 1 BD: 1 +.-.: &keypair->receiving_counter.lock FD: 89 BD: 1 +.-.: (&ndev->rs_timer) ->&ndev->lock ->pool_lock#2 ->&dir->lock#2 ->&ul->lock#2 ->&c->lock ->&____s->seqcount ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&n->list_lock ->init_task.mems_allowed_seq.seqcount ->key#28 FD: 80 BD: 1 +.+.: (wq_completion)wg-kex-wg0#3 ->(work_completion)(&peer->transmit_handshake_work) FD: 81 BD: 1 +.+.: (wq_completion)wg-kex-wg2 ->(work_completion)(&peer->transmit_handshake_work) ->(work_completion)(&peer->clear_peer_work) FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg2#2 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg2 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 78 BD: 10 +.+.: &data->mtx ->fs_reclaim ->pool_lock#2 ->&rfkill->lock FD: 80 BD: 1 +.+.: (wq_completion)wg-kex-wg1#3 ->(work_completion)(&peer->transmit_handshake_work) FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg0#4 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg1#4 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->&rq->__lock FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg1#2 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg0#2 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) ->&rq->__lock FD: 1 BD: 68 ....: &wdev->event_lock FD: 1 BD: 67 +.+.: (work_completion)(&(&sdata->dec_tailroom_needed_wk)->work) FD: 33 BD: 69 +.+.: &local->key_mtx ->&obj_hash[i].lock FD: 1 BD: 70 ..-.: &rdev->wiphy_work_lock FD: 1 BD: 67 ....: (&dwork->timer) FD: 1 BD: 67 +.+.: (work_completion)(&(&link->color_collision_detect_work)->work) FD: 21 BD: 1 +.+.: (wq_completion)phy3 ->(work_completion)(&local->reconfig_filter) FD: 80 BD: 1 +.+.: (wq_completion)wg-kex-wg2#3 ->(work_completion)(&peer->transmit_handshake_work) FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg2#4 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg2#2 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 292 BD: 2 +.+.: (work_completion)(&rdev->wiphy_work) ->&rdev->wiphy.mtx ->&lock->wait_lock ->&p->pi_lock ->&rq->__lock FD: 1 BD: 69 ..-.: &list->lock#18 FD: 1 BD: 68 +.-.: &ifibss->incomplete_lock FD: 84 BD: 72 +.+.: &local->mtx ->&rq->__lock ->&local->chanctx_mtx ->fs_reclaim ->&c->lock ->pool_lock#2 ->&lock->wait_lock ->&obj_hash[i].lock ->&base->lock ->nl_table_lock ->nl_table_wait.lock ->&____s->seqcount#2 ->&n->list_lock ->&data->mutex ->remove_cache_srcu ->&local->queue_stop_reason_lock FD: 747 BD: 1 +.+.: (wq_completion)cfg80211 ->(work_completion)(&rdev->event_work) ->(work_completion)(&(&rdev->dfs_update_channels_wk)->work) FD: 292 BD: 2 +.+.: (work_completion)(&rdev->event_work) ->&rdev->wiphy.mtx FD: 98 BD: 2 +.+.: wireless_nlevent_work ->net_rwsem FD: 21 BD: 1 +.+.: (wq_completion)phy4 ->(work_completion)(&local->reconfig_filter) FD: 21 BD: 1 +.+.: (wq_completion)phy5 ->(work_completion)(&local->reconfig_filter) FD: 88 BD: 1 +.+.: (wq_completion)phy6 ->(work_completion)(&local->reconfig_filter) ->(work_completion)(&(&local->roc_work)->work) FD: 1 BD: 3922 ..-.: &list->lock#19 FD: 33 BD: 1 +.-.: &local->rx_path_lock ->&list->lock#18 ->&rdev->wiphy_work_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 19 BD: 70 +...: &sta->lock ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&____s->seqcount#2 FD: 19 BD: 68 +.-.: &sta->rate_ctrl_lock ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&c->lock FD: 131 BD: 68 +.+.: &local->sta_mtx ->fs_reclaim ->pool_lock#2 ->&local->chanctx_mtx ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->&c->lock ->&n->list_lock ->&obj_hash[i].lock ->quarantine_lock ->nl_table_lock ->nl_table_wait.lock ->&____s->seqcount#2 ->&____s->seqcount ->&rq->__lock ->&sta->ampdu_mlme.mtx ->(work_completion)(&sta->ampdu_mlme.work) ->rcu_node_0 ->&sta->lock ->krc.lock ->&local->key_mtx ->&fq->lock ->&dentry->d_lock ->&fsnotify_mark_srcu ->&sb->s_type->i_lock_key#7 ->&s->s_inode_list_lock ->&xa->xa_lock#7 ->mount_lock ->&local->active_txq_lock[i] ->(work_completion)(&sta->drv_deliver_wk) ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock FD: 120 BD: 1 +.+.: &type->s_umount_key#46/1 ->fs_reclaim ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#32 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->binderfs_minors_mutex ->&dentry->d_lock ->&c->lock ->&____s->seqcount ->&sb->s_type->i_mutex_key#20 ->&____s->seqcount#2 FD: 41 BD: 3 +.+.: &sb->s_type->i_lock_key#32 ->&dentry->d_lock FD: 2 BD: 2 +.+.: binderfs_minors_mutex ->binderfs_minors.xa_lock FD: 1 BD: 3 ....: binderfs_minors.xa_lock FD: 93 BD: 2 +.+.: &sb->s_type->i_mutex_key#20 ->&sb->s_type->i_lock_key#32 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->&c->lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&____s->seqcount#2 ->&____s->seqcount FD: 1 BD: 3 +.+.: iunique_lock FD: 723 BD: 2 +.+.: &type->i_mutex_dir_key#6/1 ->rename_lock.seqcount ->fs_reclaim ->&dentry->d_lock ->&root->kernfs_rwsem ->tomoyo_ss ->&root->kernfs_iattr_rwsem ->cgroup_mutex ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount FD: 1 BD: 16 ....: task_group_lock FD: 80 BD: 1 .+.+: kn->active#53 ->fs_reclaim ->&c->lock ->&____s->seqcount ->&kernfs_locks->open_file_mutex[count] ->&____s->seqcount#2 ->&n->list_lock FD: 80 BD: 1 ++++: kn->active#54 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount#2 ->&n->list_lock FD: 3 BD: 110 ..-.: cgroup_threadgroup_rwsem.rss.gp_wait.lock ->&obj_hash[i].lock FD: 1 BD: 108 ....: cgroup_threadgroup_rwsem.waiters.lock FD: 1 BD: 16 +.+.: (wq_completion)cpuset_migrate_mm FD: 723 BD: 2 +.+.: &type->i_mutex_dir_key#7/1 ->rename_lock.seqcount ->fs_reclaim ->&dentry->d_lock ->&root->kernfs_rwsem ->tomoyo_ss ->&root->kernfs_iattr_rwsem ->cgroup_mutex ->pool_lock#2 ->&xa->xa_lock#3 ->&obj_hash[i].lock ->stock_lock ->&c->lock ->cgroup_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->&sb->s_type->i_lock_key#31 FD: 80 BD: 1 ++++: kn->active#55 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] ->stock_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 1 BD: 114 ....: cpuset_attach_wq.lock FD: 2 BD: 4490 ..-.: stock_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 80 BD: 1 .+.+: kn->active#56 ->fs_reclaim ->stock_lock ->pool_lock#2 ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount FD: 81 BD: 1 .+.+: kn->active#57 ->fs_reclaim ->stock_lock ->&kernfs_locks->open_file_mutex[count] ->memcg_max_mutex ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount FD: 1 BD: 8 +.+.: memcg_max_mutex FD: 1 BD: 6 ....: &per_cpu(xt_recseq, i) FD: 268 BD: 1 +.+.: nf_nat_proto_mutex ->fs_reclaim ->pool_lock#2 ->nf_hook_mutex ->cpu_hotplug_lock ->&obj_hash[i].lock ->stock_lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount FD: 1 BD: 85 ....: elock-AF_INET6 FD: 30 BD: 1 +.+.: loop_validate_mutex ->&lo->lo_mutex ->&rq->__lock ->loop_validate_mutex.wait_lock FD: 1 BD: 3 +...: rds_sock_lock FD: 1 BD: 3 +...: clock-AF_RDS FD: 1 BD: 3 ....: &rs->rs_recv_lock FD: 1 BD: 3 ....: rds_cong_monitor_lock FD: 1 BD: 10 ....: rds_cong_lock FD: 1 BD: 3 ....: &rs->rs_lock FD: 1 BD: 3 ....: &rs->rs_rdma_lock FD: 1 BD: 3 ....: &q->lock FD: 1 BD: 3 +.+.: &net->ipv4.ra_mutex FD: 1 BD: 3920 +.-.: &local->active_txq_lock[i] FD: 38 BD: 3917 +.-.: &local->handle_wake_tx_queue_lock ->&local->active_txq_lock[i] ->&local->queue_stop_reason_lock ->&fq->lock ->tk_core.seq.seqcount ->hwsim_radio_lock ->&list->lock#19 FD: 1 BD: 3926 ..-.: &local->queue_stop_reason_lock FD: 35 BD: 1 +.-.: (&peer->timer_persistent_keepalive) ->pool_lock#2 ->&c->lock ->&n->list_lock ->&list->lock#17 ->tk_core.seq.seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount#2 ->&____s->seqcount ->init_task.mems_allowed_seq.seqcount FD: 1 BD: 1 ....: _rs.lock FD: 217 BD: 3 +.+.: sk_lock-AF_CAN ->&rq->__lock ->slock-AF_CAN ->clock-AF_CAN ->&obj_hash[i].lock ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&mm->mmap_lock ->proc_subdir_lock ->fs_reclaim ->pool_lock#2 ->proc_inum_ida.xa_lock ->&____s->seqcount ->&c->lock ->tk_core.seq.seqcount ->&list->lock#5 ->hrtimer_bases.lock ->&ent->pde_unload_lock ->pcpu_lock FD: 1 BD: 4 +...: slock-AF_CAN FD: 1 BD: 4 ++..: clock-AF_CAN FD: 1 BD: 3 ..-.: rlock-AF_CAN FD: 1 BD: 3 ..-.: elock-AF_CAN FD: 80 BD: 1 +.+.: (wq_completion)wg-kex-wg0#5 ->(work_completion)(&peer->transmit_handshake_work) FD: 1 BD: 3946 +.-.: &nf_nat_locks[i] FD: 80 BD: 1 +.+.: (wq_completion)wg-kex-wg0#6 ->(work_completion)(&peer->transmit_handshake_work) FD: 80 BD: 1 +.+.: (wq_completion)wg-kex-wg1#5 ->(work_completion)(&peer->transmit_handshake_work) FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg0#7 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg1#6 ->&rq->__lock ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg1#3 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg0#3 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) ->&rq->__lock FD: 80 BD: 1 +.+.: (wq_completion)wg-kex-wg2#5 ->(work_completion)(&peer->transmit_handshake_work) FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg2#6 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg2#3 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 80 BD: 1 +.+.: (wq_completion)wg-kex-wg1#7 ->(work_completion)(&peer->transmit_handshake_work) FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg2#7 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 80 BD: 1 +.+.: (wq_completion)wg-kex-wg2#8 ->(work_completion)(&peer->transmit_handshake_work) FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg1#8 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg0#8 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg1#4 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg2#4 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg0#4 ->&rq->__lock ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 81 BD: 1 +.+.: (wq_completion)wg-kex-wg0#9 ->(work_completion)(&peer->transmit_handshake_work) ->(work_completion)(&peer->clear_peer_work) ->&rq->__lock FD: 21 BD: 1 +.+.: (wq_completion)phy7 ->(work_completion)(&local->reconfig_filter) FD: 81 BD: 1 +.+.: (wq_completion)wg-kex-wg1#9 ->(work_completion)(&peer->transmit_handshake_work) ->(work_completion)(&peer->clear_peer_work) FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg0#10 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg1#10 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg1#5 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg0#5 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) ->&rq->__lock FD: 81 BD: 1 +.+.: (wq_completion)wg-kex-wg2#9 ->(work_completion)(&peer->transmit_handshake_work) ->(work_completion)(&peer->clear_peer_work) FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg2#10 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg2#5 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 21 BD: 1 +.+.: (wq_completion)phy9 ->(work_completion)(&local->reconfig_filter) FD: 80 BD: 1 +.+.: (wq_completion)wg-kex-wg0#11 ->(work_completion)(&peer->transmit_handshake_work) FD: 21 BD: 1 +.+.: (wq_completion)phy8 ->(work_completion)(&local->reconfig_filter) FD: 80 BD: 1 +.+.: (wq_completion)wg-kex-wg1#11 ->(work_completion)(&peer->transmit_handshake_work) FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg0#12 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg1#12 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg1#6 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 80 BD: 1 +.+.: (wq_completion)wg-kex-wg2#11 ->(work_completion)(&peer->transmit_handshake_work) FD: 112 BD: 1 +.+.: (wq_completion)wg-kex-wg2#12 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg2#6 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 113 BD: 1 +.+.: (wq_completion)wg-crypt-wg0#6 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 21 BD: 1 +.+.: (wq_completion)phy10 ->(work_completion)(&local->reconfig_filter) FD: 88 BD: 1 +.+.: (wq_completion)phy11 ->(work_completion)(&local->reconfig_filter) ->(work_completion)(&(&local->roc_work)->work) FD: 21 BD: 1 +.+.: (wq_completion)phy12 ->(work_completion)(&local->reconfig_filter) FD: 21 BD: 1 +.+.: (wq_completion)phy13 ->(work_completion)(&local->reconfig_filter) FD: 1 BD: 79 +...: l2tp_ip6_lock FD: 88 BD: 1 +.+.: (wq_completion)phy14 ->(work_completion)(&local->reconfig_filter) ->(work_completion)(&(&local->roc_work)->work) FD: 338 BD: 1 +.+.: sk_lock-AF_RXRPC ->slock-AF_RXRPC ->&rxnet->local_mutex FD: 1 BD: 2 +...: slock-AF_RXRPC FD: 16 BD: 73 +...: &dccp_hashinfo.bhash[i].lock ->&dccp_hashinfo.bhash2[i].lock ->stock_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 15 BD: 74 +...: &dccp_hashinfo.bhash2[i].lock ->stock_lock ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->&c->lock ->clock-AF_INET6 ->&obj_hash[i].lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 1 BD: 3 +...: clock-AF_RXRPC FD: 1 BD: 7 ..-.: rlock-AF_RXRPC FD: 1 BD: 1 ....: (&local->client_conn_reap_timer) FD: 1 BD: 1 ....: &list->lock#20 FD: 34 BD: 3 +.+.: sk_lock-AF_ROSE ->slock-AF_ROSE ->rose_node_list_lock ->rose_list_lock ->&obj_hash[i].lock ->wlock-AF_ROSE ->&list->lock#21 ->rlock-AF_ROSE ->&rq->__lock FD: 1 BD: 4 +...: slock-AF_ROSE FD: 1 BD: 4 +...: rose_node_list_lock FD: 5 BD: 67 +...: &nr_netdev_xmit_lock_key ->nr_node_list_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 269 BD: 65 +.+.: __ip_vs_mutex ->&ipvs->dest_trash_lock ->ip_vs_sched_mutex ->nf_hook_mutex ->cpu_hotplug_lock ->&obj_hash[i].lock ->pool_lock#2 ->fs_reclaim ->pcpu_alloc_mutex ->&c->lock ->&rq->__lock ->ipvs->est_mutex ->(console_sem).lock FD: 1 BD: 66 +...: &ipvs->dest_trash_lock FD: 28 BD: 65 +.+.: flowtable_lock ->&rq->__lock FD: 1 BD: 66 +...: nr_list_lock FD: 1 BD: 65 +...: nr_neigh_list_lock FD: 1 BD: 3 +...: clock-AF_ROSE FD: 1 BD: 4 ....: wlock-AF_ROSE FD: 1 BD: 4 ....: &list->lock#21 FD: 1 BD: 4 +...: rose_list_lock FD: 1 BD: 4 ....: rlock-AF_ROSE FD: 156 BD: 100 .+.+: sb_pagefaults ->tk_core.seq.seqcount ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&journal->j_state_lock ->jbd2_handle ->&obj_hash[i].lock ->mapping.invalidate_lock ->&rq->__lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->remove_cache_srcu ->quarantine_lock ->&journal->j_wait_transaction_locked ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 117 BD: 69 +.+.: k-sk_lock-AF_INET6/1 ->k-slock-AF_INET6 ->pool_lock#2 ->&dir->lock ->fs_reclaim ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&hashinfo->ehash_locks[i] ->&tcp_hashinfo.bhash[i].lock ->k-clock-AF_INET6 ->rlock-AF_INET6 ->&list->lock#24 ->&n->list_lock FD: 1 BD: 78 +...: &token_hash[i].lock FD: 1 BD: 1 ....: _rs.lock#2 FD: 1 BD: 75 .+.-: &table->lock#3 FD: 1 BD: 1 ....: &tfile->socket.wq.wait FD: 12 BD: 221 +...: link_idr_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount#2 ->&n->list_lock ->&____s->seqcount FD: 269 BD: 1 +.+.: tracepoints_mutex ->fs_reclaim ->pool_lock#2 ->cpu_hotplug_lock ->tracepoint_srcu_srcu_usage.lock ->&rq->__lock ->&ACCESS_PRIVATE(sdp, lock) ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->tracepoint_srcu ->&x->wait#3 ->tracepoints_mutex.wait_lock ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->remove_cache_srcu FD: 746 BD: 1 +.+.: ppp_mutex ->&mm->mmap_lock ->fs_reclaim ->stock_lock ->&c->lock ->&n->list_lock ->pool_lock#2 ->stack_depot_init_mutex ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&pn->all_ppp_mutex ->ppp_mutex.wait_lock ->&rq->__lock ->&____s->seqcount#2 ->&____s->seqcount ->free_vmap_area_lock ->vmap_area_lock ->pcpu_alloc_mutex ->&obj_hash[i].lock ->purge_vmap_area_lock FD: 77 BD: 64 +.+.: &pn->all_ppp_mutex ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock FD: 1 BD: 1 ....: _rs.lock#3 FD: 1 BD: 65 +...: &ppp->rlock FD: 2 BD: 64 +...: &ppp->wlock ->&ppp->rlock FD: 214 BD: 1 +.+.: sk_lock-AF_ALG ->slock-AF_ALG ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->&obj_hash[i].lock ->&c->lock ->&dir->lock ->&____s->seqcount ->&ei->socket.wq.wait ->&rq->__lock ->&n->list_lock ->sk_lock-AF_ALG/1 ->(console_sem).lock ->remove_cache_srcu ->&____s->seqcount#2 ->&cfs_rq->removed.lock FD: 30 BD: 3 +...: slock-AF_ALG ->&sk->sk_lock.wq FD: 28 BD: 65 +.+.: &net->xdp.lock ->&rq->__lock FD: 1 BD: 65 +.+.: mirred_list_lock FD: 12 BD: 65 +...: &idev->mc_query_lock ->&obj_hash[i].lock FD: 28 BD: 65 +.+.: (work_completion)(&(&idev->mc_report_work)->work) ->&rq->__lock FD: 1 BD: 65 +...: &idev->mc_report_lock FD: 29 BD: 65 +.+.: &pnn->pndevs.lock ->&rq->__lock FD: 28 BD: 65 +.+.: &pnn->routes.lock ->&rq->__lock FD: 1 BD: 64 +...: &dev_addr_list_lock_key#4 FD: 1 BD: 64 ....: &pf->rwait FD: 1 BD: 20 ....: netdev_unregistering_wq.lock FD: 1 BD: 1 ....: &list->lock#22 FD: 1 BD: 1 ....: &ep->poll_wait FD: 29 BD: 4173 ....: &ep->poll_wait/1 ->&p->pi_lock FD: 15 BD: 6 ....: tracepoint_srcu_srcu_usage.lock ->&obj_hash[i].lock ->&ACCESS_PRIVATE(sdp, lock) ->&base->lock FD: 1 BD: 1 +.+.: &mq_lock FD: 104 BD: 2 +.+.: free_ipc_work ->rcu_node_0 ->&obj_hash[i].lock ->&pool->lock ->&rnp->exp_wq[1] ->&rq->__lock ->mount_lock ->&fsnotify_mark_srcu ->&type->s_umount_key#47 ->unnamed_dev_ida.xa_lock ->list_lrus_mutex ->&xa->xa_lock#3 ->pool_lock#2 ->sb_lock ->mnt_id_ida.xa_lock ->&ids->rwsem ->(work_completion)(&ht->run_work) ->&ht->mutex ->percpu_counters_lock ->pcpu_lock ->sysctl_lock ->proc_inum_ida.xa_lock ->stock_lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->&rnp->exp_wq[2] ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&rcu_state.expedited_wq FD: 103 BD: 3 +.+.: &type->s_umount_key#47 ->shrinker_rwsem ->rename_lock.seqcount ->&dentry->d_lock ->&sb->s_type->i_lock_key#20 ->&s->s_inode_list_lock ->&xa->xa_lock#7 ->&obj_hash[i].lock ->pool_lock#2 ->&fsnotify_mark_srcu ->sb_lock FD: 1 BD: 3 +.+.: &ids->rwsem FD: 883 BD: 1 +.+.: (wq_completion)netns ->net_cleanup_work FD: 882 BD: 2 +.+.: net_cleanup_work ->pernet_ops_rwsem ->rcu_state.barrier_mutex ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock ->&dir->lock ->stock_lock ->rcu_state.barrier_mutex.wait_lock ->&p->pi_lock FD: 1 BD: 5 +...: &net->nsid_lock FD: 1 BD: 5 +...: &tn->node_list_lock FD: 1 BD: 5 +.+.: netns_bpf_mutex FD: 1 BD: 5 ....: (&net->fs_probe_timer) FD: 1 BD: 7 ++++: &net->cells_lock FD: 1 BD: 5 ....: (&net->cells_timer) FD: 34 BD: 1 +.+.: (wq_completion)afs ->(work_completion)(&net->cells_manager) ->(work_completion)(&net->fs_manager) FD: 31 BD: 2 +.+.: (work_completion)(&net->cells_manager) ->&net->cells_lock ->bit_wait_table + i ->&rq->__lock FD: 1 BD: 5 ....: (&net->fs_timer) FD: 31 BD: 2 +.+.: (work_completion)(&net->fs_manager) ->&(&net->fs_lock)->lock ->bit_wait_table + i ->&rq->__lock FD: 1 BD: 3 +.+.: &(&net->fs_lock)->lock FD: 1 BD: 6 +.+.: &rx->incoming_lock FD: 1 BD: 6 +.+.: &call->notify_lock FD: 1 BD: 6 ....: (rxrpc_call_limiter).lock FD: 1 BD: 6 +.+.: &rx->recvmsg_lock FD: 1 BD: 6 ....: (&call->timer) FD: 1 BD: 6 ....: &list->lock#23 FD: 1 BD: 5 +.+.: (wq_completion)kafsd FD: 1 BD: 5 +...: k-clock-AF_RXRPC FD: 1 BD: 8 +.+.: (work_completion)(&data->gc_work) FD: 1 BD: 5 +.+.: (work_completion)(&ovs_net->dp_notify_work) FD: 1 BD: 5 +...: &srv->idr_lock FD: 1 BD: 71 +...: &msk->pm.lock FD: 31 BD: 1 +.-.: (&sdp->delay_work) FD: 1 BD: 4 +...: clock-AF_KCM FD: 1 BD: 7 +...: &nt->cluster_scope_lock FD: 1 BD: 5 +.+.: (work_completion)(&tn->work) FD: 1 BD: 5 +.+.: (work_completion)(&(&c->work)->work) FD: 1 BD: 3 +.+.: (work_completion)(&kcm->tx_work) FD: 164 BD: 5 +.+.: (wq_completion)krdsd ->(work_completion)(&rtn->rds_tcp_accept_w) ->(work_completion)(&(&cp->cp_send_w)->work) ->(work_completion)(&(&cp->cp_recv_w)->work) ->(work_completion)(&cp->cp_down_w) FD: 158 BD: 6 +.+.: (work_completion)(&rtn->rds_tcp_accept_w) ->fs_reclaim ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->&obj_hash[i].lock ->once_lock ->&____s->seqcount#2 ->&____s->seqcount ->&rq->__lock ->&c->lock ->&n->list_lock ->rds_cong_lock ->rds_trans_sem ->&tc->t_conn_path_lock ->&xa->xa_lock#7 ->&fsnotify_mark_srcu FD: 28 BD: 3 +.+.: &knet->mutex ->&rq->__lock FD: 1 BD: 7 ....: rds_tcp_conn_lock FD: 1 BD: 5 ....: loop_conns_lock FD: 1 BD: 5 +.+.: (wq_completion)l2tp FD: 296 BD: 4 +.+.: (work_completion)(&msk->work) ->sk_lock-AF_INET ->slock-AF_INET ->&rq->__lock FD: 1 BD: 4 +...: &mux->lock FD: 2 BD: 3 +...: &mux->rx_lock ->rlock-AF_KCM FD: 208 BD: 3 +.+.: sk_lock-AF_KCM ->slock-AF_KCM ->fs_reclaim ->pool_lock#2 ->&____s->seqcount ->&mm->mmap_lock ->&mux->lock ->clock-AF_KCM ->&obj_hash[i].lock ->&c->lock FD: 1 BD: 4 +...: slock-AF_KCM FD: 1 BD: 5 ....: (&rxnet->service_conn_reap_timer) FD: 2 BD: 8 +.+.: (work_completion)(&rxnet->service_conn_reaper) ->&rxnet->conn_lock FD: 1 BD: 4 ....: rlock-AF_KCM FD: 1 BD: 2 +.+.: ppp_mutex.wait_lock FD: 1 BD: 68 +.+.: rcu_state.barrier_mutex.wait_lock FD: 1 BD: 5 +.+.: &hn->hn_lock FD: 39 BD: 64 +.+.: &caifn->caifdevs.lock ->&obj_hash[i].lock ->&rq->__lock ->pool_lock#2 ->&this->info_list_lock ->&rnp->exp_wq[2] ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&rnp->exp_lock FD: 84 BD: 1 +.+.: (wq_completion)inet_frag_wq ->(work_completion)(&fqdir->destroy_work) FD: 83 BD: 2 +.+.: (work_completion)(&fqdir->destroy_work) ->(work_completion)(&ht->run_work) ->&ht->mutex FD: 45 BD: 2 +.+.: fqdir_free_work ->rcu_state.barrier_mutex ->rcu_state.barrier_mutex.wait_lock ->&p->pi_lock ->&obj_hash[i].lock ->pool_lock#2 ->quarantine_lock FD: 1 BD: 66 +...: &this->info_list_lock FD: 1 BD: 5 +.+.: &pnetids_ndev->lock FD: 1 BD: 72 +.-.: rlock-AF_INET6 FD: 1 BD: 72 ....: &list->lock#24 FD: 41 BD: 68 +.-.: k-slock-AF_INET6/1 ->&sctp_ep_hashtable[i].lock ->&obj_hash[i].lock ->pool_lock#2 ->k-clock-AF_INET6 ->tk_core.seq.seqcount ->clock-AF_INET6 ->&hashinfo->ehash_locks[i] ->&tcp_hashinfo.bhash[i].lock ->&base->lock FD: 1 BD: 71 ++.-: &sctp_ep_hashtable[i].lock FD: 1 BD: 64 ....: wlock-AF_UNSPEC FD: 1 BD: 64 ....: elock-AF_UNSPEC FD: 1 BD: 122 +.+.: &pa->pa_lock#2 FD: 340 BD: 1 +.+.: sock_diag_mutex ->sock_diag_table_mutex ->fs_reclaim ->&c->lock ->pool_lock#2 ->&rq->__lock ->rlock-AF_NETLINK FD: 332 BD: 3 +.+.: nlk_cb_mutex-SOCK_DIAG ->fs_reclaim ->pool_lock#2 ->&c->lock ->inet_diag_table_mutex ->vsock_table_lock ->&rq->__lock ->remove_cache_srcu ->&obj_hash[i].lock ->rlock-AF_NETLINK FD: 1 BD: 5 +.+.: &fn->fou_lock FD: 1 BD: 5 +.+.: ipvs->sync_mutex FD: 752 BD: 11 ++++: rdma_nets_rwsem ->rdma_nets.xa_lock ->&rq->__lock ->&device->compat_devs_mutex ->rdma_nets_rwsem.wait_lock ->&lock->wait_lock ->&p->pi_lock FD: 1 BD: 5 +...: k-clock-AF_NETLINK FD: 12 BD: 2 +.+.: (work_completion)(&nlk->work) ->&obj_hash[i].lock ->pool_lock#2 ->vmap_area_lock ->purge_vmap_area_lock ->rlock-AF_NETLINK ->&dir->lock FD: 33 BD: 1 ..-.: &(&bat_priv->dat.work)->timer FD: 33 BD: 1 ..-.: &(&bat_priv->bla.work)->timer FD: 32 BD: 2 +.+.: (work_completion)(&(&bat_priv->dat.work)->work) ->&hash->list_locks[i] ->&rq->__lock ->&obj_hash[i].lock ->&base->lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->pool_lock#2 FD: 1 BD: 3 +...: &hash->list_locks[i] FD: 35 BD: 2 +.+.: (work_completion)(&(&bat_priv->bla.work)->work) ->key#20 ->&obj_hash[i].lock ->rcu_node_0 ->&rq->__lock ->&base->lock ->crngs.lock ->&cfs_rq->removed.lock FD: 1 BD: 5 +.+.: &sn->gssp_lock FD: 1 BD: 8 +.+.: &cd->hash_lock FD: 1 BD: 5 +.+.: xfrm_state_gc_work FD: 1 BD: 65 +...: &net->xfrm.xfrm_state_lock FD: 1 BD: 5 +...: ip6_fl_lock FD: 1 BD: 5 ....: (&net->ipv6.ip6_fib_timer) FD: 1 BD: 64 ....: (&mrt->ipmr_expire_timer) FD: 1 BD: 5 ....: (&ipvs->dest_trash_timer) FD: 1 BD: 5 +.+.: (work_completion)(&(&ipvs->expire_nodest_conn_work)->work) FD: 92 BD: 6 +.+.: (work_completion)(&(&ipvs->est_reload_work)->work) ->ipvs->est_mutex FD: 1 BD: 5 +...: recent_lock FD: 83 BD: 5 +.+.: hashlimit_mutex ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->init_mm.page_table_lock ->proc_subdir_lock ->proc_inum_ida.xa_lock ->&obj_hash[i].lock ->&base->lock ->&ent->pde_unload_lock ->&c->lock FD: 1 BD: 5 +.+.: (work_completion)(&(&cnet->ecache.dwork)->work) FD: 1 BD: 5 +.+.: (work_completion)(&net->xfrm.policy_hash_work) FD: 50 BD: 65 +...: &net->xfrm.xfrm_policy_lock ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->&n->list_lock ->&____s->seqcount#13 ->krc.lock FD: 1 BD: 5 +.+.: (work_completion)(&net->xfrm.state_hash_work) FD: 79 BD: 65 +.+.: &block->lock ->fs_reclaim ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock FD: 78 BD: 64 +.+.: &block->cb_lock ->flow_indr_block_lock FD: 77 BD: 65 +.+.: flow_indr_block_lock ->fs_reclaim ->&c->lock ->pool_lock#2 FD: 29 BD: 91 ....: &sk->sk_lock.wq ->&p->pi_lock FD: 62 BD: 1 +.-.: (&peer->timer_retransmit_handshake) ->&peer->endpoint_lock ->&obj_hash[i].lock ->&list->lock#17 FD: 127 BD: 3 +.+.: sk_lock-AF_INET/1 ->slock-AF_INET ->rlock-AF_INET ->&list->lock#24 ->&obj_hash[i].lock ->pool_lock#2 ->&c->lock ->&list->lock#25 ->fs_reclaim ->&n->list_lock ->&rq->__lock ->krc.lock ->&base->lock ->sctp_assocs_id_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 1 BD: 4 ....: rlock-AF_INET FD: 4 BD: 2 +.-.: icmp_global.lock ->batched_entropy_u8.lock FD: 1 BD: 25 ....: namespace_sem.wait_lock FD: 33 BD: 1 +.-.: (&peer->timer_send_keepalive) ->pool_lock#2 ->&c->lock ->&list->lock#17 ->tk_core.seq.seqcount ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 1 BD: 4 ....: unix_gc_wait.lock FD: 11 BD: 77 +.-.: &sctp_port_hashtable[i].lock ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock FD: 1 BD: 71 ....: &asoc->wait FD: 1 BD: 82 ..-.: key#23 FD: 31 BD: 1 ..-.: &(&net->ipv6.addr_chk_work)->timer FD: 49 BD: 1 +.-.: (&p->forward_delay_timer) ->&br->lock FD: 72 BD: 1 .+.+: sb_writers#13 ->mount_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#27 ->&wb->list_lock FD: 11 BD: 225 +.-.: sctp_assocs_id_lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock FD: 1 BD: 76 ..-.: &list->lock#25 FD: 117 BD: 3 +.+.: sk_lock-AF_INET6/1 ->slock-AF_INET6 ->rlock-AF_INET6 ->&obj_hash[i].lock ->&____s->seqcount ->pool_lock#2 ->&list->lock#24 ->&c->lock ->&rq->__lock ->&list->lock#25 ->fs_reclaim ->tk_core.seq.seqcount ->quarantine_lock ->krc.lock ->&base->lock ->rcu_node_0 ->sctp_assocs_id_lock ->&____s->seqcount#2 ->&n->list_lock FD: 53 BD: 68 +.-.: slock-AF_INET6/1 ->&sctp_ep_hashtable[i].lock ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock ->&sctp_port_hashtable[i].lock ->clock-AF_INET6 ->tk_core.seq.seqcount ->&base->lock ->&____s->seqcount ->&c->lock ->&n->list_lock FD: 1 BD: 2 ....: tracepoint_srcu FD: 3 BD: 3 +.+.: unix_gc_lock ->rlock-AF_UNIX ->unix_gc_wait.lock FD: 29 BD: 11 ....: &bdi->wb_waitq ->&p->pi_lock FD: 1 BD: 69 ....: key#22 FD: 19 BD: 3888 +...: _xmit_ETHER/2 ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock FD: 31 BD: 2 +.+.: sk_lock-AF_ALG/1 ->slock-AF_ALG FD: 81 BD: 64 +.+.: &chain->filter_chain_lock ->&block->lock ->&block->proto_destroy_lock FD: 1 BD: 65 +.+.: &block->proto_destroy_lock FD: 1 BD: 129 +.+.: text_mutex.wait_lock FD: 1 BD: 64 +.+.: bcm_notifier_lock FD: 1 BD: 3 +...: l2tp_ip_lock FD: 1 BD: 16 +.+.: cgroup_mutex.wait_lock FD: 84 BD: 3 +.+.: sk_lock-AF_NFC ->slock-AF_NFC ->&k->list_lock ->&k->k_lock ->llcp_devices_lock ->fs_reclaim ->pool_lock#2 ->&local->sdp_lock ->&local->sockets.lock ->&rq->__lock ->&c->lock FD: 1 BD: 4 +...: slock-AF_NFC FD: 1 BD: 4 +.+.: llcp_devices_lock FD: 29 BD: 4 +.+.: &local->sdp_lock ->&local->sockets.lock ->&rq->__lock FD: 1 BD: 5 ++++: &local->sockets.lock FD: 1 BD: 3 +...: clock-AF_NFC FD: 1 BD: 3 ....: rlock-AF_NFC FD: 1 BD: 3 ....: &list->lock#26 FD: 1 BD: 2 +.+.: tracepoints_mutex.wait_lock FD: 1 BD: 69 +...: &sw_ctx_tx->encrypt_compl_lock FD: 1 BD: 64 +...: mfc_unres_lock FD: 29 BD: 69 +.+.: tcpv6_prot_mutex ->rcu_node_0 ->&rq->__lock FD: 1 BD: 69 +...: device_spinlock FD: 9 BD: 18 +.+.: &pdata->netdev_lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->&dir->lock#2 FD: 1 BD: 64 +...: mfc_unres_lock#2 FD: 774 BD: 1 .+.+: &rdma_nl_types[idx].sem ->link_ops_rwsem FD: 1 BD: 3 +.+.: (work_completion)(&(&sw_ctx_tx->tx_work.work)->work) FD: 1 BD: 1 ....: _rs.lock#4 FD: 1 BD: 3 ....: ndev_hash_lock FD: 7 BD: 11 +.+.: devices.xa_lock ->&c->lock ->pool_lock#2 FD: 746 BD: 17 +.+.: &rxe->usdev_lock ->&pdata->netdev_lock ->rtnl_mutex ->(console_sem).lock ->&rq->__lock ->rtnl_mutex.wait_lock ->&p->pi_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&lock->wait_lock ->&pool->lock FD: 79 BD: 3946 +.+.: &table->lock#4 ->fs_reclaim ->&c->lock ->pool_lock#2 ->&n->list_lock ->&obj_hash[i].lock ->&table->rwlock ->&device->event_handler_rwsem ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 3947 ....: &table->rwlock FD: 31 BD: 3949 ++++: &device->event_handler_rwsem ->&rq->__lock FD: 30 BD: 13 +.+.: subsys mutex#83 ->&rq->__lock ->&k->k_lock FD: 181 BD: 12 ++++: &device->client_data_rwsem ->&xa->xa_lock#15 ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->&xa->xa_lock#16 ->&xa->xa_lock#17 ->&c->lock ->crngs.lock ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->init_mm.page_table_lock ->&obj_hash[i].lock ->purge_vmap_area_lock ->&x->wait#27 ->(console_sem).lock ->&x->wait#28 ->krc.lock ->lock ->&root->kernfs_rwsem ->&____s->seqcount#2 ->ib_mad_port_list_lock ->kernfs_idr_lock ->lock#7 ->umad_ida.xa_lock ->&x->wait#9 ->chrdevs_lock ->&k->list_lock ->gdp_mutex ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->uevent_sock_mutex ->uevent_sock_mutex.wait_lock ->subsys mutex#84 ->&n->list_lock ->rcu_node_0 ->pcpu_alloc_mutex ->uverbs_ida.xa_lock ->remove_cache_srcu ->subsys mutex#85 ->subsys mutex#86 ->rds_ib_devices_lock ->ib_nodev_conns_lock ->smc_ib_devices.mutex ->&device->event_handler_rwsem ->&rcu_state.expedited_wq ->&pnettable->lock FD: 749 BD: 1 +.+.: (wq_completion)infiniband ->(work_completion)(&work->work)#2 FD: 1 BD: 5 ....: &device->cache_lock FD: 1 BD: 3 +.+.: rdmacg_mutex FD: 1 BD: 155 +.+.: gdp_mutex.wait_lock FD: 748 BD: 2 +.+.: (work_completion)(&work->work)#2 ->fs_reclaim ->pool_lock#2 ->&rxe->usdev_lock ->&device->cache_lock ->&obj_hash[i].lock ->&device->event_handler_rwsem ->&lock->wait_lock ->&p->pi_lock ->&rq->__lock ->&c->lock FD: 9 BD: 15 +.+.: &xa->xa_lock#15 ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock FD: 9 BD: 13 +.+.: &xa->xa_lock#16 ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock FD: 2 BD: 67 +.+.: &xa->xa_lock#17 ->pool_lock#2 FD: 1 BD: 13 ....: &x->wait#27 FD: 1 BD: 1 ....: _rs.lock#5 FD: 1 BD: 13 ....: &x->wait#28 FD: 1 BD: 13 ....: ib_mad_port_list_lock FD: 1 BD: 67 +.+.: &id_priv->qp_mutex FD: 2 BD: 67 +.+.: &xa->xa_lock#18 ->pool_lock#2 FD: 2 BD: 67 ....: &cm_id_priv->lock ->&cm.lock FD: 1 BD: 68 ....: &cm.lock FD: 1 BD: 13 ....: umad_ida.xa_lock FD: 3 BD: 13 +.+.: subsys mutex#84 ->&k->k_lock FD: 1 BD: 13 ....: uverbs_ida.xa_lock FD: 3 BD: 13 +.+.: subsys mutex#85 ->&k->k_lock FD: 3 BD: 13 +.+.: subsys mutex#86 ->&k->k_lock FD: 1 BD: 13 +.+.: rds_ib_devices_lock FD: 1 BD: 13 +.+.: ib_nodev_conns_lock FD: 1 BD: 1 ....: _rs.lock#6 FD: 1 BD: 3 +...: smc_lgr_list.lock FD: 1 BD: 1 ....: _rs.lock#7 FD: 38 BD: 98 +.+.: &hugetlbfs_i_mmap_rwsem_key ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock ->ptlock_ptr(page) ->quarantine_lock ->&meta->lock ->kfence_freelist_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 1 BD: 4987 .-.-: init_task.mems_allowed_seq.seqcount FD: 748 BD: 2 +.+.: (work_completion)(&smcibdev->port_event_work) ->&rxe->usdev_lock ->&lock->wait_lock ->&p->pi_lock ->&table->rwlock ->smc_lgr_list.lock FD: 749 BD: 12 +.+.: &device->compat_devs_mutex ->fs_reclaim ->&xa->xa_lock#15 ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->&sem->wait_lock ->&p->pi_lock ->sysfs_symlink_target_lock ->&c->lock ->&rq->__lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->subsys mutex#83 ->&rxe->usdev_lock ->&zone->lock ->&____s->seqcount ->&____s->seqcount#2 ->&n->list_lock ->remove_cache_srcu ->rcu_node_0 ->&rcu_state.expedited_wq ->batched_entropy_u8.lock ->kfence_freelist_lock ->&cfs_rq->removed.lock ->&lock->wait_lock FD: 1 BD: 12 ....: rdma_nets_rwsem.wait_lock FD: 1 BD: 5 +.+.: &device->unregistration_lock FD: 115 BD: 98 +.+.: &sb->s_type->i_mutex_key#21 ->&rq->__lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->integrity_iint_lock ->tk_core.seq.seqcount ->fs_reclaim ->hugetlb_lock ->&c->lock ->&____s->seqcount ->&n->list_lock ->rcu_node_0 ->&____s->seqcount#2 ->&resv_map->lock ->&obj_hash[i].lock ->remove_cache_srcu ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->(console_sem).lock ->console_owner_lock ->console_owner ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 1 BD: 1 ....: _rs.lock#8 FD: 72 BD: 1 .+.+: sb_writers#14 ->mount_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#16 ->&wb->list_lock ->&rq->__lock FD: 87 BD: 98 +.+.: &hugetlb_fault_mutex_table[i] ->&resv_map->lock ->hugetlb_lock ->&rq->__lock ->&lock->wait_lock ->fs_reclaim ->stock_lock ->pool_lock#2 ->&anon_vma->rwsem ->ptlock_ptr(page) ->&____s->seqcount ->&mm->page_table_lock ->&c->lock ->rcu_node_0 ->&n->list_lock ->&rcu_state.expedited_wq ->&____s->seqcount#2 ->remove_cache_srcu ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->batched_entropy_u8.lock ->kfence_freelist_lock ->(console_sem).lock ->console_owner_lock ->console_owner ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 4 BD: 100 +.+.: &resv_map->lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 3 +.+.: nfnl_grp_active_lock FD: 169 BD: 1 +.+.: (wq_completion)bond2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 64 +...: &bond->ipsec_lock FD: 1 BD: 4 +.+.: &q->instances_lock FD: 1 BD: 4 +...: &log->instances_lock FD: 33 BD: 1 ..-.: &(&bond->mcast_work)->timer FD: 167 BD: 3811 +.+.: (work_completion)(&(&bond->mcast_work)->work) ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&rq->__lock ->rtnl_mutex.wait_lock ->&p->pi_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 44 BD: 65 +.+.: (work_completion)(&br->mcast_gc_work) ->&br->multicast_lock ->(&mp->timer) ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->krc.lock ->(&p->rexmit_timer) ->(&p->timer) ->&rq->__lock FD: 10 BD: 64 +...: &bond->mode_lock ->&c->lock ->pool_lock#2 FD: 1 BD: 64 ....: (&br->hello_timer) FD: 1 BD: 64 ....: (&br->topology_change_timer) FD: 1 BD: 64 ....: (&br->tcn_timer) FD: 1 BD: 64 ....: (&brmctx->ip4_mc_router_timer) FD: 1 BD: 64 ....: (&brmctx->ip4_other_query.timer) FD: 1 BD: 64 ....: (&brmctx->ip6_mc_router_timer) FD: 1 BD: 64 ....: (&brmctx->ip6_other_query.timer) FD: 101 BD: 1 +.+.: (wq_completion)bond1#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond2#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 2 BD: 7 +...: vsock_table_lock ->clock-AF_VSOCK FD: 52 BD: 3 +.+.: sk_lock-AF_CAIF ->&rq->__lock ->slock-AF_CAIF ->&obj_hash[i].lock ->&this->info_list_lock ->(console_sem).lock ->&ei->socket.wq.wait ->clock-AF_CAIF ->elock-AF_CAIF ->console_owner_lock ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock FD: 1 BD: 4 +...: slock-AF_CAIF FD: 1 BD: 8 ++..: clock-AF_VSOCK FD: 5 BD: 3 +.+.: sk_lock-AF_VSOCK ->slock-AF_VSOCK ->vsock_table_lock ->clock-AF_VSOCK ->rlock-AF_VSOCK FD: 1 BD: 4 +...: slock-AF_VSOCK FD: 169 BD: 1 +.+.: (wq_completion)bond2#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond3#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 4 ....: rlock-AF_VSOCK FD: 1 BD: 3 +...: rlock-AF_CAIF FD: 1 BD: 4 +...: clock-AF_CAIF FD: 1 BD: 4 ....: elock-AF_CAIF FD: 169 BD: 1 +.+.: (wq_completion)bond3#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 1 ....: net_ratelimit_state.lock FD: 38 BD: 73 +.-.: (&tw->tw_timer) ->&hashinfo->ehash_locks[i] ->&tcp_hashinfo.bhash[i].lock ->&obj_hash[i].lock ->stock_lock FD: 1 BD: 71 ....: fastopen_seqlock.seqcount FD: 1 BD: 99 ....: key#24 FD: 1 BD: 67 +...: _xmit_LOOPBACK#2 FD: 1 BD: 68 +...: nr_node_list_lock FD: 1 BD: 64 +.+.: isotp_notifier_lock FD: 1 BD: 64 +.+.: raw_notifier_lock FD: 2 BD: 109 +.+.: (work_completion)(flush) ->&list->lock#5 FD: 1 BD: 67 +...: &qdisc_xmit_lock_key FD: 1 BD: 67 +...: _xmit_TUNNEL#2 FD: 1 BD: 69 +.+.: &ping_table.lock FD: 13 BD: 64 +.+.: mrt_lock ->pool_lock#2 ->&dir->lock#2 ->&c->lock ->&n->list_lock FD: 1 BD: 6 .+..: ip_set_ref_lock FD: 1 BD: 65 ....: &rdev->dev_wait FD: 104 BD: 1 +.+.: &net->xfrm.xfrm_cfg_mutex ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&n->list_lock ->&obj_hash[i].lock ->rlock-AF_NETLINK ->(console_sem).lock ->&net->xfrm.xfrm_policy_lock ->&lock->wait_lock ->&net->xfrm.xfrm_state_lock ->&cfs_rq->removed.lock ->&policy->lock ->&list->lock#35 ->&base->lock FD: 29 BD: 1 +.+.: sk_lock-AF_RDS ->&rq->__lock ->slock-AF_RDS FD: 1 BD: 67 +...: &vlan_netdev_xmit_lock_key FD: 1 BD: 2 +...: slock-AF_RDS FD: 1 BD: 4 +.+.: raw_sk_list.lock FD: 1 BD: 65 +.+.: devices_lock FD: 1 BD: 65 +...: &dev_addr_list_lock_key#5 FD: 1 BD: 67 +...: &qdisc_xmit_lock_key#2 FD: 44 BD: 3888 +...: &dev_addr_list_lock_key#3/2 ->&dev_addr_list_lock_key/1 ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock ->&c->lock FD: 1 BD: 2 +.+.: vlan_ioctl_mutex.wait_lock FD: 1 BD: 64 +.+.: (work_completion)(&port->wq) FD: 2 BD: 8 +.+.: &id_priv->handler_mutex ->&id_priv->lock FD: 1 BD: 8 ....: &x->wait#29 FD: 2 BD: 7 ....: rds_conn_lock ->rds_cong_lock FD: 44 BD: 7 +.+.: &tc->t_conn_path_lock ->clock-AF_INET6 ->&cp->cp_lock ->&c->lock ->pool_lock#2 FD: 1 BD: 112 +...: rds_tcp_tc_list_lock FD: 1 BD: 82 ..-.: &cp->cp_lock FD: 131 BD: 6 +.+.: (work_completion)(&(&cp->cp_send_w)->work) ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->pool_lock#2 ->&obj_hash[i].lock ->&cp->cp_lock ->&c->lock FD: 1 BD: 82 ....: &rm->m_rs_lock FD: 131 BD: 6 +.+.: (work_completion)(&(&cp->cp_recv_w)->work) ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 FD: 1 BD: 82 ..-.: &list->lock#27 FD: 142 BD: 6 +.+.: (work_completion)(&cp->cp_down_w) ->&cp->cp_cm_lock ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->&sb->s_type->i_lock_key#8 ->&xa->xa_lock#7 ->&fsnotify_mark_srcu ->&list->lock#27 ->&cp->cp_lock ->(work_completion)(&(&cp->cp_conn_w)->work) FD: 1 BD: 7 +.+.: &cp->cp_cm_lock FD: 1 BD: 7 +.+.: (work_completion)(&(&cp->cp_conn_w)->work) FD: 1 BD: 97 ..-.: key#25 FD: 169 BD: 1 +.+.: (wq_completion)bond11 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond13 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond5#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond14 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond3#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond6#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 67 ....: (&local->dynamic_ps_timer) FD: 20 BD: 69 +.+.: &sta->ampdu_mlme.mtx ->&sta->lock FD: 1 BD: 69 +.+.: (work_completion)(&sta->ampdu_mlme.work) FD: 28 BD: 69 +.+.: (work_completion)(&sta->drv_deliver_wk) ->&rq->__lock FD: 32 BD: 68 +.-.: (&ifibss->timer) ->&rdev->wiphy_work_lock FD: 745 BD: 2 +.+.: (work_completion)(&(&rdev->dfs_update_channels_wk)->work) ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock FD: 169 BD: 1 +.+.: (wq_completion)bond15 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 28 BD: 67 +.+.: (work_completion)(&local->dynamic_ps_enable_work) ->&rq->__lock FD: 1 BD: 67 +.+.: (work_completion)(&sdata->recalc_smps) FD: 1 BD: 67 +.+.: (work_completion)(&link->csa_finalize_work) FD: 1 BD: 67 +.+.: (work_completion)(&link->color_change_finalize_work) FD: 1 BD: 67 +.+.: (work_completion)(&(&link->dfs_cac_timer_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond7 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond4#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond16 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond8 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond17 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond3#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond9 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond4#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond18 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond5#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond10 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond19 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 129 BD: 66 +.+.: k-sk_lock-AF_INET/1 ->k-slock-AF_INET ->pool_lock#2 ->&dir->lock ->fs_reclaim ->&queue->rskq_lock ->&h->lhash2[i].lock ->&tcp_hashinfo.bhash[i].lock ->&rq->__lock ->k-clock-AF_INET ->&c->lock ->&hashinfo->ehash_locks[i] ->tk_core.seq.seqcount ->&obj_hash[i].lock ->&base->lock ->slock-AF_INET ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 169 BD: 1 +.+.: (wq_completion)bond11#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond6#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond20 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond12 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond21 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 61 BD: 1 +.-.: (&peer->timer_new_handshake) ->&peer->endpoint_lock FD: 169 BD: 1 +.+.: (wq_completion)bond13#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond6#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond22 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond7#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond14#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond23 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond8#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond8#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond9#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond16#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 80 BD: 1 .+.+: kn->active#58 ->fs_reclaim ->stock_lock ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond20#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond13#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 4 +...: key#26 FD: 169 BD: 1 +.+.: (wq_completion)bond14#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond15#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond16#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 6 +.+.: ebt_mutex.wait_lock FD: 169 BD: 1 +.+.: (wq_completion)bond18#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond21#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond19#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond16#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond20#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond17#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond18#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond21#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond23#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond22#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond19#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond24 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond23#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond24#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond26 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond25 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 2 +.+.: nf_sockopt_mutex.wait_lock FD: 169 BD: 1 +.+.: (wq_completion)bond27 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond28 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond26#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond29 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond30 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond31 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond32 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond33 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond34 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond33#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond35 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond36 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond37 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond38 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond39 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond34#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond40 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond35#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond36#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond41 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond37#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond38#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond39#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond42 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond40#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond43 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond41#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond44 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond42#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond45 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond43#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond46 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond44#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond47 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond48 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond49 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond50 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond51 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond10#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond53 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond11#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond54 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond27#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond12#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond55 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond28#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond56 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond29#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond57 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond30#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond58 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond13#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond29#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond31#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond59 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond14#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond32#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond30#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond60 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond33#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond31#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond61 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond34#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond32#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond15#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond62 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond35#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond63 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond36#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond33#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond64 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond16#5 ->(work_completion)(&(&slave->notify_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond37#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond65 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond38#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond17#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond66 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond39#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 33 BD: 1 ..-.: &(&conn->disc_work)->timer FD: 34 BD: 21 +.+.: (work_completion)(&(&conn->disc_work)->work) ->pool_lock#2 ->&list->lock#9 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount FD: 169 BD: 1 +.+.: (wq_completion)bond67 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond40#3 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock FD: 101 BD: 1 +.+.: (wq_completion)bond34#4 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond18#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond68 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond35#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond69 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond70 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond71 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond73 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond37#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 31 BD: 1 ..-.: net/ipv4/devinet.c:474 FD: 169 BD: 1 +.+.: (wq_completion)bond74 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond38#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond75 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 101 BD: 1 +.+.: (wq_completion)bond20#4 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond76 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond21#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond39#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond77 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond22#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond78 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond40#4 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond79 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond80 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond42#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond81 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond23#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond43#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond24#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond83 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond84 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond44#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond25#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond85 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond26#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond27#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond86 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond28#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond87 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond29#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond46#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond88 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond30#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond89 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond69#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond90 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond91 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond31#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond92 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond70#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond47#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond93 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond94 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond95 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond96 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond97 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond49#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond98 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond99 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond34#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond50#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond100 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond75#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond35#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond102 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond76#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond103 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 111 +.+.: freezer_mutex.wait_lock FD: 169 BD: 1 +.+.: (wq_completion)bond104 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond78#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond37#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond54#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond105 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond79#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond55#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond106 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond38#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond65#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond80#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond107 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond81#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond108 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond57#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond82 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond39#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond109 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond58#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond112 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond59#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond83#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond60#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond84#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond113 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond114 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond42#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond115 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond43#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond116 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond117 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond48#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond87#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond118 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond49#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond64#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond119 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond50#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond65#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond120 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond51#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond88#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 33 BD: 1 +.-.: (&pool->idle_timer) ->&pool->lock/1 ->&pool->lock FD: 169 BD: 1 +.+.: (wq_completion)bond121 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond122 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond89#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond123 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond125 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond124 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond126 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond127 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond128 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond91#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond129 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond131 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond130 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond94#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond132 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond95#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond133 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond96#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond134 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond135 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond136 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 14 BD: 1 +.-.: (t) ->&obj_hash[i].lock ->&base->lock FD: 31 BD: 1 ..-.: security/integrity/ima/ima_queue_keys.c:35 FD: 5 BD: 2 +.+.: (ima_keys_delayed_work).work ->ima_keys_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 169 BD: 1 +.+.: (wq_completion)bond98#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond138 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond139 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond99#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond141 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond140 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond101 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond142 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond143 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond145 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond146 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond147 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond148 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond149 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond103#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond150 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond151 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond94#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond152 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond110 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond153 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond105#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond154 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond97#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond113#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond98#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond114#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond99#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond106#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond156 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond115#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond100#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond107#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond116#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond157 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond101#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond108#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond158 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond117#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond102#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond109#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond159 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond118#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond103#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond119#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond160 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond104#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond110#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond161 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond120#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond105#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond162 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond111 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond121#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond106#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond163 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond122#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond107#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond123#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond112#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond164 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond108#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond124#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond109#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond113#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond165 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond125#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond110#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond166 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond126#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond111#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond167 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond127#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond112#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond168 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond128#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond113#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond114#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond129#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond114#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond115#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond169 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond130#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond115#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond131#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond170 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond116#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond116#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond171 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond117#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond133#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond118#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond172 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond134#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond135#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond119#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond173 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond117#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond120#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond136#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond174 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond118#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond121#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond137 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond175 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond122#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond138#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond176 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond123#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond119#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond139#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond177 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond124#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond140#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond125#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond178 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond141#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond120#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond126#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond179 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond142#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond121#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond127#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond180 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond143#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond122#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond128#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond181 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond144 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond123#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond129#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond145#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond146#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond124#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond131#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond147#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond148#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond132#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond133#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond125#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond149#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond134#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond126#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond150#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond135#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond127#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond151#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond136#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond152#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond137#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond153#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond129#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond138#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond154#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond139#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond155 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond140#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond156#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond141#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond157#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond142#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond131#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond158#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond67#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond143#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond159#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond144#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond132#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond160#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond145#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond68#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond161#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond133#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond146#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond162#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond134#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond147#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond163#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond148#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond164#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond69#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond149#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond165#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond70#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond135#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond150#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond166#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond71#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond136#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond151#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond72 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond167#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond152#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond137#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond138#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond168#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond153#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond73#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond169#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond154#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond155#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond170#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond171#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond75#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond139#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond156#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond172#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond157#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond140#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond158#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond173#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond76#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond174#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond159#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond175#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond160#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond141#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond176#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond161#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond142#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond77#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond177#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond162#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond178#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond163#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond143#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond179#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond164#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond180#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond165#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond144#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond78#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond181#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond166#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond182 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond167#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond183 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond79#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond168#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond184 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond169#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond145#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond185 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond81#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond170#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond186 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond187 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond171#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond172#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond82#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond188 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond173#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond83#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond147#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond189 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond174#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond190 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond175#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond84#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond176#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond192 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond85#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond148#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond177#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond193 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond86#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond149#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond194 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond179#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond150#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond195 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond180#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond87#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond191 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond196 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond181#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond192#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond197 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond182#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond89#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond193#2 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond198 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond152#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond183#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond90#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 101 BD: 1 +.+.: (wq_completion)bond194#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond199 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond184#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond91#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond195#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond200 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond185#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 101 BD: 1 +.+.: (wq_completion)bond196#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond201 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond197#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond202 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond187#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond186#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond198#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond203 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond188#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond204 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond199#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond189#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond190#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond200#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond205 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond191#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond206 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond192#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond207 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond193#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond208 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond194#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond209 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond195#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond210 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond196#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond211 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond197#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond212 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond198#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond213 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 40 BD: 66 +.-.: (&mp->timer) ->&br->multicast_lock FD: 40 BD: 66 +.-.: (&p->timer) ->&br->multicast_lock FD: 1 BD: 66 ....: (&p->rexmit_timer) FD: 169 BD: 1 +.+.: (wq_completion)bond214 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond199#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond200#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond215 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond216 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond201#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond202#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond218 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond203#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond219 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond204#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond220 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond205#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond221 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond206#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond222 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond207#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond223 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond208#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond224 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond209#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond225 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond210#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond226 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond227 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond212#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond228 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond213#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond6#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond229 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond214#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond7#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond215#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond231 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond216#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond232 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond217 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond233 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond218#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond234 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond8#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond219#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond235 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond220#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond236 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond221#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond237 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond222#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond238 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond223#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond224#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond10#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond240 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond225#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond241 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond226#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond242 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond227#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond243 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond228#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond244 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond229#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond245 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond13#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond230 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond246 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond231#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond247 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond232#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond14#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond248 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond233#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond249 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond234#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond250 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond15#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond235#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond236#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond252 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond237#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond253 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond238#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond16#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond254 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond239 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond17#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond255 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond240#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond256 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond241#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond257 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond242#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond258 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond243#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond259 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond21#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond244#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond260 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond245#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond261 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 38 BD: 2 +.+.: (work_completion)(&pool->idle_cull_work) ->wq_pool_attach_mutex ->wq_pool_attach_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 169 BD: 1 +.+.: (wq_completion)bond226#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond246#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond262 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond247#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond263 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond248#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond227#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond22#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond264 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond249#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond228#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond265 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond23#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond250#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond266 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond251 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond24#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond229#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond267 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond252#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond25#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond268 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond253#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond254#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond270 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond230#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond26#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond255#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond271 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond256#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond27#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond272 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond257#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond231#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond273 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond258#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond28#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond232#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond259#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond274 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond29#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond233#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond260#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond275 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond30#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond261#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond276 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond262#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond277 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond31#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond263#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond278 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond32#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond264#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond279 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond33#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond265#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond280 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond266#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond34#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond281 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond267#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond234#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond268#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond282 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond269 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond283 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond35#6 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond270#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond284 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond36#4 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond271#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond235#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond285 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond272#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond236#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond286 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond37#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 101 BD: 1 +.+.: (wq_completion)bond237#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond38#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond274#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond238#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond288 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond39#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond275#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond289 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 101 BD: 1 +.+.: (wq_completion)bond239#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond40#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond276#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond240#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond290 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond277#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond241#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond291 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond242#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond292 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond293 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond279#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond294 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond280#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond295 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond296 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond281#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond282#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond42#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond297 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond283#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond43#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond298 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond284#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond44#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond299 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond285#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond300 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond286#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond301 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond287 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond302 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond288#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond289#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond303 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond290#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond304 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond291#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond306 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond292#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond293#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond307 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond308 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond294#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond309 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond310 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond296#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond311 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond297#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond298#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond312 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond313 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond299#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond314 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond300#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond315 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond301#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond316 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond302#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond303#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond317 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond318 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond304#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond305 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond320 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond306#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond321 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond307#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond322 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond308#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond323 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond309#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond324 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond310#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond325 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond326 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond311#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond312#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond327 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond313#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond328 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond314#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond329 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond315#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond330 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond316#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond331 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond332 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond71#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond318#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond333 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond319 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond334 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond73#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond320#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond335 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond74#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond321#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond336 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond75#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond322#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond337 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond76#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond323#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond338 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond324#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond77#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond339 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond325#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond78#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond340 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond326#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond79#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond341 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond327#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond342 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond81#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond328#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond82#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond329#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond344 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond83#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond330#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond345 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond84#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond331#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond346 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond85#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond332#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond347 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond86#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond333#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond348 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond87#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond334#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond349 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond88#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond335#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond350 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond89#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond336#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond351 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond90#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond337#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond352 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond91#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond338#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond353 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond339#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond92#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond354 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond340#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond93#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond355 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond341#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond94#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond356 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond342#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond95#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond357 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond343 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond96#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond358 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond344#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond97#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond359 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond345#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond98#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond360 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond346#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond99#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond361 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond347#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond100#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond362 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond348#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond363 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond349#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond102#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond364 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond350#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond365 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond103#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond351#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond366 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond352#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond104#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond367 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond353#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond368 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond354#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond106#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond369 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond355#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond107#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond370 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond356#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond108#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond371 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond357#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond372 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond358#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond109#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond373 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond359#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond110#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond374 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond360#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond111#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond375 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond361#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond112#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond376 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond362#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond113#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond377 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond363#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond378 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond114#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond364#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond379 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond115#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond365#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond380 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond116#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond366#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond381 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond367#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond117#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond382 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond368#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond118#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond383 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond369#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond384 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond119#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond370#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond385 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond120#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond371#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond386 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond121#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond372#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond387 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond122#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond373#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond388 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond374#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond123#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond389 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond375#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond124#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond390 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond376#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond125#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond391 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond377#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond126#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond392 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond378#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond127#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond393 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond379#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond394 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond128#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond380#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond395 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond129#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond381#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond396 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond382#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond397 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond383#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond398 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond131#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond384#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond399 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond385#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond132#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond400 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond386#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond133#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond401 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond134#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond387#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond402 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond388#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond403 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond135#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond389#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond404 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond136#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond390#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond405 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond137#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond391#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond406 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond392#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond138#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond407 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond139#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond393#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond408 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond394#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond141#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond409 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond395#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond410 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond142#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond396#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond411 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond143#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond397#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond412 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond144#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond398#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond413 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond145#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond399#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond414 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond146#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond400#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond415 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond401#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond147#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond402#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond403#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond416 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond417 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond148#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond404#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond418 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond149#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond419 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond150#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond406#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond151#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond407#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond421 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond408#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond422 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond409#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond152#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond423 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond410#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond424 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond153#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond411#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond154#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond425 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond412#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond155#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond426 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond156#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond427 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond413#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond157#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond428 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond414#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond158#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond429 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond415#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond430 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond159#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond416#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond431 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond160#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond417#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond432 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond161#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond418#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond433 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond162#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond419#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond434 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond420 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond435 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond421#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond163#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond436 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond422#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond164#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond437 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond423#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond165#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond438 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond424#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond166#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond439 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond425#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond167#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond440 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond426#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond168#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond441 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond427#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond169#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond442 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond428#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond170#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond443 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond429#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond444 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond445 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond171#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond431#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond446 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond172#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond432#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond447 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 4 +.+.: oom_adj_mutex.wait_lock FD: 169 BD: 1 +.+.: (wq_completion)bond433#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond448 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond434#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond173#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond449 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond435#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond450 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond436#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond174#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond451 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond437#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond175#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond452 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond438#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond176#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond453 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond439#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond177#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond454 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond440#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond178#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond455 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond441#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond179#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond456 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond442#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond180#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond457 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond181#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond458 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond443#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond182#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond459 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond444#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond183#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond460 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond184#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond461 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 14 BD: 1 +.-.: (&tsc_sync_check_timer) ->&obj_hash[i].lock ->&base->lock FD: 169 BD: 1 +.+.: (wq_completion)bond445#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond185#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond462 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond463 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond446#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond186#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond464 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond187#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond465 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond447#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond188#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond466 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond448#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond189#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond449#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond467 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond450#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond468 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond191#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond451#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond469 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond192#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond452#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond470 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond193#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond453#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond471 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond194#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond472 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond195#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond455#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond473 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond196#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond456#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond474 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond457#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond475 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond197#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond476 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond458#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond198#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond477 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond459#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond199#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond478 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond200#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond479 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond460#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond480 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond461#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond481 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond462#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond482 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond201#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond463#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond202#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond464#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond483 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond465#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond484 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond466#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond203#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond485 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond467#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond204#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond486 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond468#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond487 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond469#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond205#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond488 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond470#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond206#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond489 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond471#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond207#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond490 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond472#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond208#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond473#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond491 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond209#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond474#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond492 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond210#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond475#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond493 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond476#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond211#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond494 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond477#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond495 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond478#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond212#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond479#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond496 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond480#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond497 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond481#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond213#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond498 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond482#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond499 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond483#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond500 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond484#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond501 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond485#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond214#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond502 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond486#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond215#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond503 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond487#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond216#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond504 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond488#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond505 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond217#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond489#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond506 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond218#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond490#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond507 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond219#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond491#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond508 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond220#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond492#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond509 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond221#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond493#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond510 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond494#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond511 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond223#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond495#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond224#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond496#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond512 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond225#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond497#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond513 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond226#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond498#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond514 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond227#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond515 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond228#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond499#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond229#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond500#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond516 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond230#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond501#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond517 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond502#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond231#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond518 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond503#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond232#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond519 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond504#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond520 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond505#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond521 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond506#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond522 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond233#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond507#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond523 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond508#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond234#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond524 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond235#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond509#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond525 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond236#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond510#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond526 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond237#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond511#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond527 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond512#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond528 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond513#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond514#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond238#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond529 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond515#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond239#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond530 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond240#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond531 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond517#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond241#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 33 BD: 1 +.-.: (&peer->timer_zero_key_material) FD: 79 BD: 7 +.+.: (work_completion)(&peer->clear_peer_work) ->&handshake->lock ->&peer->keypairs.keypair_update_lock FD: 169 BD: 1 +.+.: (wq_completion)bond532 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond518#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond242#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond533 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond519#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond534 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond243#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond520#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond535 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond521#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond536 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond522#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond244#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond537 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond523#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond538 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond524#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond539 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond525#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond245#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond540 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond526#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond541 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond527#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond542 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond528#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond529#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond543 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond544 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond247#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond545 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond248#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond546 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond530#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond547 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond531#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond548 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond532#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond549 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond249#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond533#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond550 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond250#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond534#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond551 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond251#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond535#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond552 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond252#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond536#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond553 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond537#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond253#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond554 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond538#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond254#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond555 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond539#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond556 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond540#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond557 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond255#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond541#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond256#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond542#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond558 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond257#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond559 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond543#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond258#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond560 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond544#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond561 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond259#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond545#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond562 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond260#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond546#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond563 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond261#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond564 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond262#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond547#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond548#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond263#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond549#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond264#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond565 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond550#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond566 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond551#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond567 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond265#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond266#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond552#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 41 BD: 3 +.+.: sk_lock-AF_PPPOX ->slock-AF_PPPOX ->chan_lock ->&obj_hash[i].lock ->&rq->__lock ->clock-AF_PPPOX ->&pn->hash_lock ->rlock-AF_PPPOX FD: 1 BD: 4 +...: slock-AF_PPPOX FD: 1 BD: 4 +.+.: chan_lock FD: 1 BD: 4 +...: clock-AF_PPPOX FD: 1 BD: 4 ....: rlock-AF_PPPOX FD: 169 BD: 1 +.+.: (wq_completion)bond568 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 4963 ....: cid_lock FD: 169 BD: 1 +.+.: (wq_completion)bond553#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond267#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond554#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond569 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond268#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond555#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 216 BD: 3 +.+.: sk_lock-AF_TIPC ->&rq->__lock ->slock-AF_TIPC ->&obj_hash[i].lock ->&base->lock ->clock-AF_TIPC ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->&list->lock#28 ->&cfs_rq->removed.lock ->&c->lock ->&ei->socket.wq.wait ->&zone->lock ->&____s->seqcount ->&____s->seqcount#2 ->&n->list_lock ->tk_core.seq.seqcount ->&list->lock#5 FD: 35 BD: 4 +...: slock-AF_TIPC ->&list->lock#28 FD: 169 BD: 1 +.+.: (wq_completion)bond570 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 4 +...: clock-AF_TIPC FD: 169 BD: 1 +.+.: (wq_completion)bond556#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond571 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond557#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 272 BD: 1 +.+.: bpf_dispatcher_xdp.mutex ->pack_mutex ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->init_mm.page_table_lock ->bpf_lock ->text_mutex ->text_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->cpu_hotplug_lock ->&obj_hash[i].lock ->&rnp->exp_wq[0] ->&rnp->exp_wq[1] ->rcu_state.exp_mutex.wait_lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->bpf_dispatcher_xdp.mutex.wait_lock ->&cfs_rq->removed.lock FD: 169 BD: 1 +.+.: (wq_completion)bond572 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond269#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond270#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond573 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond271#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond558#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond574 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond272#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond559#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond273#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond560#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond575 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond274#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond561#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 64 +.+.: &tn->idrinfo->lock FD: 169 BD: 1 +.+.: (wq_completion)bond576 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond562#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond577 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond276#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond563#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond578 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 1 ....: _rs.lock#9 FD: 169 BD: 1 +.+.: (wq_completion)bond277#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond564#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond579 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond278#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond565#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond566#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond580 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond567#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond280#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond581 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond568#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond569#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond281#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond570#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond282#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond582 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 169 BD: 1 +.+.: (wq_completion)bond571#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond572#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond583 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond573#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond584 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond574#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond585 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond575#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond586 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond283#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond587 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond284#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond576#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond285#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond577#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond588 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond578#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond589 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 10 +.+.: ima_extend_list_mutex.wait_lock FD: 169 BD: 1 +.+.: (wq_completion)bond286#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond579#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond590 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond580#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond287#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond591 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond581#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond288#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond592 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond582#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond289#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond583#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond594 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond290#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond584#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond595 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond291#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond585#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 1 BD: 3 +...: clock-AF_PHONET FD: 1 BD: 5 +.+.: &pnsocks.lock FD: 1 BD: 4 +.+.: resource_mutex FD: 1 BD: 3 ....: rlock-AF_PHONET FD: 169 BD: 1 +.+.: (wq_completion)bond596 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond292#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond586#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond293#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond587#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond588#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond597 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond589#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond294#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond598 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond295#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond599 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond590#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond591#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond600 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond592#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond601 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond602 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond296#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond594#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond603 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond604 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond595#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond297#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond605 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond298#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 2 +.+.: loop_validate_mutex.wait_lock FD: 169 BD: 1 +.+.: (wq_completion)bond606 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond596#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 1 ....: _rs.lock#10 FD: 1 BD: 1 +.+.: (work_completion)(&(&hinfo->gc_work)->work) FD: 1 BD: 1 +...: &hinfo->lock FD: 169 BD: 1 +.+.: (wq_completion)bond608 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond299#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond597#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond609 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond300#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond598#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond610 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond301#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond599#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond611 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond302#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond600#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond612 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond303#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond601#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond613 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond304#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond602#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond614 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond305#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 34 BD: 3 +.+.: &chan->lock/1 ->sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 ->slock-AF_BLUETOOTH-BTPROTO_L2CAP ->clock-AF_BLUETOOTH ->rlock-AF_BLUETOOTH ->wlock-AF_BLUETOOTH ->pool_lock#2 ->&rq->__lock ->&dir->lock ->&obj_hash[i].lock FD: 29 BD: 4 +.+.: sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 ->&rq->__lock ->slock-AF_BLUETOOTH-BTPROTO_L2CAP FD: 169 BD: 1 +.+.: (wq_completion)bond603#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond306#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond604#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond615 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond605#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond616 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond307#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond606#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond308#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond607 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond617 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond309#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 31 BD: 3 +.+.: sk_lock-AF_QIPCRTR ->slock-AF_QIPCRTR ->&rq->__lock ->clock-AF_QIPCRTR ->rlock-AF_QIPCRTR FD: 1 BD: 4 +...: slock-AF_QIPCRTR FD: 1 BD: 4 +...: clock-AF_QIPCRTR FD: 1 BD: 4 ....: rlock-AF_QIPCRTR FD: 169 BD: 1 +.+.: (wq_completion)bond310#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond608#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond618 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond609#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond311#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond619 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond610#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond312#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond620 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond611#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond612#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond622 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond313#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond613#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond623 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond614#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond615#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond624 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond616#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond625 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond314#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond617#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond315#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond618#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond626 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond316#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond619#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond627 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond620#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond628 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond317#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond621 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond629 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond318#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond622#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond630 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond319#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond623#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond631 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond320#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond624#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond632 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond321#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond625#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond633 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 136 +.+.: rcu_state.exp_wake_mutex.wait_lock FD: 169 BD: 1 +.+.: (wq_completion)bond626#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond322#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond634 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond627#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond323#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond324#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond635 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond628#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond325#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond636 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond629#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond637 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond630#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond326#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond638 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond631#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond639 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond632#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond640 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond633#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond641 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond634#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond327#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond642 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond635#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond328#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond643 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond644 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond636#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond645 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond637#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond646 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond638#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond332#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond639#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond333#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond647 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond334#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond640#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond648 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond641#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond335#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond649 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 11 BD: 4 +...: &pn->l2tp_tunnel_idr_lock ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock ->&____s->seqcount#2 ->&____s->seqcount FD: 169 BD: 1 +.+.: (wq_completion)bond336#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond337#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond650 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond642#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond651 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond643#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond338#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond652 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond339#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond644#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond92#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond653 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond645#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond646#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond654 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond340#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond647#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond655 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond648#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond656 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond649#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond657 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond650#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond341#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond93#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond658 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond651#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond94#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond659 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond652#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond95#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond342#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond660 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond653#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond96#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond661 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond654#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond97#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond343#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond662 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond655#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond663 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond344#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond656#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond664 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond345#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond657#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond658#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond665 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond98#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond346#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond659#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond666 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond99#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond660#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond100#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond667 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond661#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond668 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond347#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond669 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond662#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond348#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond101#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond663#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond349#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond671 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond664#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond672 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond665#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond351#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond673 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond666#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond674 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond352#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond667#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond102#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond675 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond353#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond668#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond676 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond669#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond677 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond670 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond678 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond354#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond671#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond679 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond103#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond672#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond680 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond104#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond673#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond105#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond355#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond681 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond356#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond106#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond357#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond674#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond682 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond358#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond675#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond683 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond359#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond676#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond684 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond360#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond677#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond361#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond678#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond685 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond679#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond362#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond686 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond680#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond363#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond687 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond681#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond682#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond688 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond364#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond107#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond683#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond689 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond684#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond365#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond690 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond366#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond691 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond108#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond685#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond692 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond109#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond110#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond686#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond694 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond687#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond367#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond688#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond695 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond689#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond111#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond696 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond690#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond112#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond697 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond691#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond369#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond113#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond698 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond692#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond699 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond693 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond370#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond700 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond114#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond371#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond701 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond694#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond702 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond115#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond703 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond695#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond704 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond696#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond372#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond116#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond697#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond706 ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond373#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond698#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond374#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond699#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond707 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond375#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond700#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond708 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond376#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond701#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond709 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond710 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond702#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond711 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond703#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond377#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond117#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond712 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond704#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond118#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond713 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond714 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond705 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond715 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond706#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond378#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond707#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond716 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond708#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond119#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond717 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond379#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond710#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond718 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond719 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond711#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond120#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond712#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond720 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond713#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond721 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond714#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond722 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond715#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond723 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond380#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond716#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond717#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond121#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond724 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond382#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond718#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond122#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond383#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond719#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond725 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond720#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond726 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond721#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond727 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond723#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond722#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond384#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond728 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond729 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond724#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond385#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond730 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond725#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond731 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond726#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond732 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond733 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond727#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond728#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond734 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond729#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond735 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond124#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond736 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond730#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond731#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond732#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond125#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond733#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond734#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond735#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond736#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond737 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond387#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond738 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond737#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond126#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond388#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond389#3 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond739 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond390#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond738#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond740 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond127#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond739#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond741 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond740#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond391#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond742 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond128#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond741#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond743 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond742#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond744 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond129#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond743#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond130#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond392#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond744#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond745 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond745#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond746 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond393#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond746#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond747 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond394#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond747#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond748 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond395#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond748#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond749 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond396#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond749#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond750 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond750#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond131#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond751 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond751#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond397#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond752 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond752#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond132#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond753 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond398#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond753#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond133#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond754 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond754#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond399#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond755 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock FD: 169 BD: 1 +.+.: (wq_completion)bond755#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond134#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond756 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond400#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond756#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond757 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond401#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond757#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond135#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond402#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond136#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond758 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond759 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond137#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond759#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond760 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond403#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond138#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond760#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond761 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond404#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond762 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond762#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond405#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond139#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond763 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond140#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond763#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond764 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond765 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond765#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond407#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond766 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond766#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond408#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond767 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond767#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond768 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond141#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->rcu_node_0 FD: 169 BD: 1 +.+.: (wq_completion)bond768#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond769 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond409#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond770 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond769#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond770#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond771 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond142#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond772 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond771#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond772#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond773 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond774 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond410#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond143#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond773#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond774#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond775 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond411#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond775#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond776 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond412#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond777 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond776#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond778 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond777#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond778#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond779 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond780 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond781 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond779#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond782 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond780#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond783 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond781#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond144#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond413#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond784 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond145#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond785 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond414#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond782#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond146#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond415#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond416#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond786 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond787 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond783#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond788 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond417#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond784#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond147#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond789 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond148#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond785#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond786#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond418#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond787#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond419#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond788#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond149#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond790 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond789#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond790#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond420#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond791 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond791#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond792 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond793 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond421#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond792#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond794 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond422#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond795 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond423#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond796 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond793#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond797 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond794#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond795#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond798 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond150#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond424#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond799 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond800 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond151#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond796#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond801 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond797#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond802 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond798#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond425#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond804 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond799#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond805 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond800#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond806 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond801#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond426#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond807 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond808 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond152#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond803 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond427#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond804#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond153#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond809 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond805#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond428#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond810 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond811 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond812 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond806#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond154#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond813 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond807#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond429#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond814 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond815 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond816 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond808#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond817 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond155#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond818 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond809#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond430#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond156#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond819 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond810#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond820 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond431#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond821 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond157#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond811#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond822 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond812#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond823 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond432#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond158#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond824 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond433#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond159#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond813#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond434#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond814#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond825 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond435#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond160#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond815#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond826 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond816#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond827 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond817#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond436#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond437#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond818#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond819#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond828 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond820#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond821#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond829 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond830 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond161#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond822#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->rcu_node_0 FD: 169 BD: 1 +.+.: (wq_completion)bond831 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond439#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond823#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond824#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond832 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond825#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond833 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond440#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond834 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond441#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond826#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond835 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond442#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond443#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond827#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond836 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond828#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond837 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond829#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond162#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond838 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond830#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond839 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond831#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond840 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond163#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond832#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond841 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond164#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond842 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond833#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond165#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond834#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond843 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond444#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond166#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond445#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond835#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond167#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond844 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond836#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond845 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond837#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond846 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond839#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond838#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond847 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond446#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond848 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond447#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond849 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond448#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond850 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond840#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond449#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond851 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond450#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond852 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond841#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond168#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond452#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond453#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond853 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond169#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond842#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond170#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond854 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond843#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond171#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond855 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond844#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond172#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond856 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond173#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond845#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond454#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond857 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond858 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond174#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond859 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond846#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond860 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond847#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond861 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond848#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond862 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond849#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond863 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond864 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond850#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond175#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond865 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond176#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond455#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond866 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond456#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond851#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond177#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond867 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond852#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond457#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond868 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond853#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond458#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond459#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond869 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond460#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond854#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond871 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond855#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond872 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond873 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond856#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond874 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond857#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond875 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond858#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond178#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond859#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond876 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond860#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond461#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond179#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond861#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond180#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond462#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond877 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond463#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond862#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond464#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond863#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond181#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond879 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond880 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond864#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond465#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond865#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond881 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond466#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond866#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond182#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond867#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond882 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond868#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond883 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond884 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond183#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond870 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond869#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond885 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond184#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond871#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond467#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond185#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond468#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond872#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond886 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond873#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond470#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond874#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond887 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond471#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond875#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond888 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond472#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond876#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond889 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond186#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond473#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond890 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond474#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond891 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond475#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond892 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond877#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond476#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond893 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond477#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond894 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond878 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond478#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond479#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond895 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond480#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 5 +...: &list->lock#28 FD: 169 BD: 1 +.+.: (wq_completion)bond896 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond879#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond481#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond482#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond897 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond483#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond898 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond880#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond899 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond881#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond900 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond882#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond901 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond883#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond902 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond884#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond903 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond484#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond904 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond485#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond885#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond905 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond886#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond906 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond887#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond907 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond888#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond486#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond908 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond487#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond909 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond910 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond911 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond488#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond912 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond889#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond913 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond890#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond914 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond891#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond489#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond915 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond892#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond490#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond916 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond917 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond893#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond918 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond919 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond492#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond920 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond894#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond921 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 3 +...: &xs->map_list_lock FD: 1 BD: 3 +.+.: &xs->mutex FD: 1 BD: 3 +...: clock-AF_XDP FD: 169 BD: 1 +.+.: (wq_completion)bond895#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond493#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond922 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond923 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond896#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond924 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond897#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond495#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond925 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond898#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond496#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond926 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond927 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond899#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond900#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond497#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond928 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond901#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond929 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond498#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond930 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond902#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond903#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond499#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond904#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond931 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond905#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 28 BD: 3 +.+.: pfkey_mutex ->&rq->__lock FD: 1 BD: 3 +...: dgram_lock FD: 1 BD: 3 +...: clock-AF_IEEE802154 FD: 1 BD: 3 ....: rlock-AF_IEEE802154 FD: 1 BD: 66 ....: &f->f_owner.lock FD: 169 BD: 1 +.+.: (wq_completion)bond932 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 3 +...: clock-AF_KEY FD: 1 BD: 3 ....: wlock-AF_KEY FD: 1 BD: 3 ....: rlock-AF_KEY FD: 169 BD: 1 +.+.: (wq_completion)bond906#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond933 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond907#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond934 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond908#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond935 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond909#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond501#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond910#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond936 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond911#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond937 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond912#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond938 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond913#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond939 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond502#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond940 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond503#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond915#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond941 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond504#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond916#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond942 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond917#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond943 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond918#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond919#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond944 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond945 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond920#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond921#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond946 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond947 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond948 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond922#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond949 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond923#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond950 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond951 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond924#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond953 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond925#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond954 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond505#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 1 ....: &wq#4 FD: 1 BD: 1 +.+.: &s->lock FD: 169 BD: 1 +.+.: (wq_completion)bond926#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 3 ....: wlock-AF_PPPOX FD: 169 BD: 1 +.+.: (wq_completion)bond927#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond928#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond955 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond506#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond929#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond930#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 84 BD: 3 +.+.: sk_lock-AF_PHONET ->&rq->__lock ->slock-AF_PHONET ->&pnsocks.lock ->resource_mutex ->&obj_hash[i].lock ->port_mutex#2 ->fs_reclaim ->&c->lock ->&n->list_lock ->pool_lock#2 FD: 1 BD: 4 +...: slock-AF_PHONET FD: 1 BD: 3 ....: &list->lock#29 FD: 169 BD: 1 +.+.: (wq_completion)bond956 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond507#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond508#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond957 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond509#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond931#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond958 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond932#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond510#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond933#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond959 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond934#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond935#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond511#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond960 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond961 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond936#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond512#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond962 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond937#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond938#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond963 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond964 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond939#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond965 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond513#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond940#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond966 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond514#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond941#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 4 +...: &pernet->lock FD: 169 BD: 1 +.+.: (wq_completion)bond942#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond943#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond515#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond944#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond967 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 78 BD: 3 +.+.: nlk_cb_mutex-NETFILTER ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&n->list_lock ->&obj_hash[i].lock ->rlock-AF_NETLINK ->&____s->seqcount FD: 169 BD: 1 +.+.: (wq_completion)bond945#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 1 ....: _rs.lock#11 FD: 153 BD: 3 +.+.: &journal->j_barrier ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock ->&journal->j_state_lock ->&journal->j_wait_commit ->&journal->j_wait_done_commit ->&journal->j_list_lock ->&journal->j_checkpoint_mutex ->jbd2_handle ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 169 BD: 1 +.+.: (wq_completion)bond946#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond968 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond516#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond947#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond969 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond517#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond970 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond948#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond518#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond949#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 4513 ....: key#27 FD: 1 BD: 3 +.+.: &vmpr->sr_lock FD: 3 BD: 2 +.+.: (work_completion)(&vmpr->work) ->&vmpr->sr_lock ->&vmpr->events_lock FD: 1 BD: 3 +.+.: &vmpr->events_lock FD: 35 BD: 1 +.+.: &cache->alloc_lock ->swap_avail_lock ->&rq->__lock ->&p->lock#2 FD: 51 BD: 1 +.+.: shmem_swaplist_mutex ->&rq->__lock ->&xa->xa_lock#19 ->&info->lock ->&p->lock#2 ->&xa->xa_lock#7 FD: 6 BD: 4470 ....: &xa->xa_lock#19 ->pool_lock#2 ->&ctrl->lock ->key#27 ->&obj_hash[i].lock FD: 30 BD: 98 +.+.: mutex ->scomp_scratch.lock ->&pool->lock#3 ->&____s->seqcount ->pool_lock#2 ->&rq->__lock FD: 1 BD: 99 +.+.: scomp_scratch.lock FD: 4 BD: 4471 +.+.: &pool->lock#3 ->&obj_hash[i].lock FD: 5 BD: 4469 +.+.: &tree->lock ->&pool->lock#3 ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 4471 ....: &ctrl->lock FD: 1 BD: 4472 +.+.: mmlist_lock FD: 3 BD: 4486 ..-.: lock#10 ->&lruvec->lru_lock FD: 32 BD: 1 +.+.: percpu_charge_mutex ->&rq->__lock ->stock_lock ->&p->pi_lock FD: 3 BD: 2 +.+.: (work_completion)(&({ do { const void *__vpp_verify = (typeof((&memcg_stock) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&memcg_stock))) *)((&memcg_stock)))); (typeof((typeof(*((&memcg_stock))) *)((&memcg_stock)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->stock_lock FD: 11 BD: 4472 +.+.: &cache->free_lock ->&p->lock#2 FD: 169 BD: 1 +.+.: (wq_completion)bond971 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond950#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond519#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond972 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond951#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond520#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond973 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond952 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond521#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond974 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 1 +...: &list->lock#30 FD: 169 BD: 1 +.+.: (wq_completion)bond975 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond953#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond522#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond976 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond954#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond977 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond955#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond524#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond978 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond525#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond979 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond956#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 1 ....: _rs.lock#12 FD: 2 BD: 1 +...: &list->lock#31 ->rlock-AF_INET6 FD: 169 BD: 1 +.+.: (wq_completion)bond980 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond526#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond981 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond957#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond527#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond982 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond958#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond528#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond983 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond959#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond984 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond960#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond529#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond961#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond985 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond962#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond986 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 2 ..-.: key#28 FD: 169 BD: 1 +.+.: (wq_completion)bond963#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond530#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond987 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond964#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond531#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond988 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 82 BD: 1 +.-.: k-slock-AF_INET/1 ->&obj_hash[i].lock ->tk_core.seq.seqcount ->&c->lock ->pool_lock#2 ->slock-AF_INET ->&____s->seqcount ->&hashinfo->ehash_locks[i] ->&base->lock ->&n->list_lock ->&tcp_hashinfo.bhash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond965#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond966#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond989 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond967#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond532#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond968#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond533#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond990 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond969#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond534#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond991 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond970#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond535#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond992 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond993 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond971#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond994 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond972#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond537#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond995 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond973#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond996 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond997 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond974#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond998 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond975#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond999 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond538#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond976#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1000 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 69 +.-.: &r->producer_lock#3 FD: 169 BD: 1 +.+.: (wq_completion)bond1001 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond977#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond1002 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 64 +...: &pmc->lock FD: 169 BD: 1 +.+.: (wq_completion)bond1003 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond539#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond978#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond187#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1004 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond979#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond540#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1005 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1006 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond980#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond188#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1007 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond981#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond189#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond1008 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond541#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1009 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1010 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond982#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond542#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1011 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1012 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond983#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond984#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1013 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond985#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond543#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1014 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond986#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond987#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond190#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1016 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1017 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond544#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond988#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1018 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1019 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond545#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond989#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1020 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1021 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1022 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond546#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond990#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond547#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond991#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond548#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond992#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1023 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond191#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond549#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond993#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond192#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1024 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond994#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond550#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1025 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1026 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond995#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond996#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1027 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 4 +...: smc_v4_hashinfo.lock FD: 1 BD: 3 +.+.: (work_completion)(&smc->connect_work) FD: 267 BD: 3 +.+.: sk_lock-AF_SMC ->&rq->__lock ->slock-AF_SMC ->smc_v4_hashinfo.lock ->clock-AF_SMC ->&smc->clcsock_release_lock ->k-clock-AF_INET FD: 1 BD: 4 +...: slock-AF_SMC FD: 263 BD: 4 +.+.: &smc->clcsock_release_lock ->k-sk_lock-AF_INET ->k-slock-AF_INET ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->stock_lock ->&sb->s_type->i_lock_key#8 ->&xa->xa_lock#7 ->&fsnotify_mark_srcu ->&mm->mmap_lock ->fs_reclaim ->&net->smc.mutex_fback_rsn ->k-clock-AF_INET ->&c->lock ->&rq->__lock FD: 1 BD: 4 +...: clock-AF_SMC FD: 169 BD: 1 +.+.: (wq_completion)bond1028 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond194#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond997#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond998#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond551#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond999#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond195#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1000#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1029 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1001#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond196#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond552#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond553#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond554#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1002#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1003#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond555#3 ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1004#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond556#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1030 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond557#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1005#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond197#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond558#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 3 +...: raw_lock FD: 169 BD: 1 +.+.: (wq_completion)bond1006#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1031 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 4 +.+.: nf_conntrack_mutex.wait_lock FD: 169 BD: 1 +.+.: (wq_completion)bond1007#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1032 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1008#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1009#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1033 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1034 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1010#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1011#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond199#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond560#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 4 BD: 4 +.-.: _xmit_NONE#2 ->&obj_hash[i].lock ->pool_lock#2 FD: 169 BD: 1 +.+.: (wq_completion)bond1012#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 44 BD: 3888 +...: &macsec_netdev_addr_lock_key#2/2 ->&dev_addr_list_lock_key/1 FD: 169 BD: 1 +.+.: (wq_completion)bond562#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1013#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond563#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond200#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1035 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1014#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond564#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1015 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond201#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1036 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond202#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1016#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond203#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1017#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1037 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1018#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1038 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1039 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1019#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond566#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1020#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond204#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond205#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond206#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1021#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1040 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1041 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1022#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1023#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond207#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1042 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1043 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1024#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1044 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1045 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1025#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1046 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1026#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1047 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1027#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1048 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1028#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1049 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1029#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1030#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1050 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1031#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1051 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1032#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1052 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond570#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1053 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond572#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1033#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1054 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond208#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1034#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 31 BD: 1 ..-.: &(&hctx->run_work)->timer FD: 169 BD: 1 +.+.: (wq_completion)bond1035#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1036#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1055 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1056 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1037#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1057 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1038#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1058 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1039#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond209#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1059 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1040#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1041#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1060 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1061 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond210#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1062 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond211#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1063 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond575#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1042#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1064 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1043#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1065 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1044#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond212#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1066 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1045#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1067 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1046#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 13 +.+.: (work_completion)(&(&hdev->interleave_scan)->work) FD: 28 BD: 22 +.+.: (work_completion)(&(&conn->id_addr_timer)->work) ->&rq->__lock FD: 28 BD: 21 +.+.: (work_completion)(&(&conn->auto_accept_work)->work) ->&rq->__lock FD: 1 BD: 21 +.+.: (work_completion)(&(&conn->idle_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1068 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1048#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1047#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1049#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1069 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1070 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond213#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1072 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond214#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1050#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1051#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1073 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1053#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1075 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond215#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1054#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1076 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1055#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond578#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1077 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1078 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1057#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 4 +.+.: sco_sk_list.lock FD: 29 BD: 3 +.+.: sk_lock-AF_BLUETOOTH-BTPROTO_SCO ->slock-AF_BLUETOOTH-BTPROTO_SCO ->&rq->__lock FD: 1 BD: 4 +...: slock-AF_BLUETOOTH-BTPROTO_SCO FD: 169 BD: 1 +.+.: (wq_completion)bond1079 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1059#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1058#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond580#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 28 BD: 1 +.+.: nfnl_subsys_none ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1060#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond581#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 85 BD: 4 +.+.: (work_completion)(&(&local->roc_work)->work) ->&rq->__lock ->&local->mtx FD: 33 BD: 1 ..-.: &(&local->roc_work)->timer FD: 169 BD: 1 +.+.: (wq_completion)bond153#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond154#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1080 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond155#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1061#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1081 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond156#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond217#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond218#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond157#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1082 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1083 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond219#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond220#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1084 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1085 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1086 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1062#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1087 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1063#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond158#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1088 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1064#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond221#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond159#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond160#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1089 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond1065#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond222#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1090 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1091 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1066#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1092 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1093 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond586#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1067#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1094 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond223#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond161#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond587#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond162#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1068#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond163#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond589#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1069#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1095 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1070#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond590#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1071 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond591#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1072#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 78 BD: 1 +.+.: &audit_cmd_mutex.lock ->&rq->__lock ->fs_reclaim ->&c->lock ->pool_lock#2 ->rlock-AF_NETLINK FD: 169 BD: 1 +.+.: (wq_completion)bond592#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1073#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1074 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1076#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 4 ....: &list->lock#32 FD: 1 BD: 3 +...: clock-AF_NETROM FD: 33 BD: 3 +.+.: sk_lock-AF_NETROM ->slock-AF_NETROM ->&rq->__lock ->&obj_hash[i].lock ->wlock-AF_NETROM ->&list->lock#32 ->nr_list_lock ->rlock-AF_NETROM FD: 1 BD: 4 +...: slock-AF_NETROM FD: 1 BD: 4 ....: wlock-AF_NETROM FD: 1 BD: 4 ....: rlock-AF_NETROM FD: 169 BD: 1 +.+.: (wq_completion)bond224#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1100 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1078#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond225#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond593 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1079#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1101 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond226#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond164#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond227#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond165#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1081#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond228#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond594#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1103 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond229#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond595#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1083#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond596#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond166#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1105 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1107 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1084#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond230#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1108 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond167#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1109 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1085#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond168#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1110 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1086#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond169#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1087#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond232#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1111 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1112 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1089#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1114 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond598#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond599#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1091#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond600#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond601#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1117 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1092#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1093#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond233#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond170#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1097 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1095#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond234#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond235#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1099 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond603#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond236#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1122 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond237#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond238#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1100#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1124 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond171#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond604#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond605#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond173#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond172#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond606#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond174#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond607#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond175#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1130 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond241#5 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1132 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1133 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1134 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond176#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond609#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond242#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond610#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond611#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond613#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond612#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1138 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1110#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1140 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond614#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1111#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1144 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1142 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond617#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond615#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond616#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond243#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1146 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond179#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond178#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond618#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond619#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond620#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond621#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond244#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1117#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1119 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond180#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond622#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond245#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond181#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1122#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond182#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond623#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond246#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond247#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond183#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond244#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond624#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond245#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond184#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1123 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1149 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond246#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond185#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond625#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1150 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond247#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond186#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1124#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond626#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1151 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond248#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond188#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1126 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1155 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1127 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond628#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond249#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 116 +.+.: jump_label_mutex.wait_lock FD: 169 BD: 1 +.+.: (wq_completion)bond1157 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond629#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1159 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1161 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1128 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond189#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1163 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 8 ....: &sem->waiters FD: 169 BD: 1 +.+.: (wq_completion)bond1130#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1165 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1131 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond190#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1132#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 82 BD: 1 +.-.: (&msk->sk.icsk_retransmit_timer) ->slock-AF_INET FD: 169 BD: 1 +.+.: (wq_completion)bond1134#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1167 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1136 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond631#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond191#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1169 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond632#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 8 BD: 3 +.+.: sk_lock-AF_X25 ->slock-AF_X25 ->wlock-AF_X25 ->&list->lock#33 ->&obj_hash[i].lock ->x25_list_lock ->rlock-AF_X25 FD: 1 BD: 4 +...: slock-AF_X25 FD: 1 BD: 4 ....: wlock-AF_X25 FD: 1 BD: 4 ....: &list->lock#33 FD: 1 BD: 4 ....: rlock-AF_X25 FD: 169 BD: 1 +.+.: (wq_completion)bond1138#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1171 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 66 ....: key#29 FD: 169 BD: 1 +.+.: (wq_completion)bond192#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1140#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond193#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond634#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1142#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1173 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1175 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1144#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond635#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1177 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1146#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond636#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1148 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1179 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond637#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1150#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1181 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1152 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond638#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1183 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond639#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1185 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1154 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond640#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond641#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1187 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond642#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond643#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1189 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1157#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1191 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1159#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond645#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1193 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1160 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond646#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1195 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1162 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond648#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1164 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1197 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 2 +.+.: bpf_dispatcher_xdp.mutex.wait_lock FD: 169 BD: 1 +.+.: (wq_completion)bond649#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1199 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1166 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond650#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1201 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1168 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond651#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond652#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1203 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1170 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond194#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1205 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond653#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1172 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond195#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1207 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 9 BD: 3 +.+.: sk_lock-AF_AX25 ->slock-AF_AX25 ->clock-AF_AX25 ->ax25_list_lock ->&obj_hash[i].lock ->&list->lock#34 ->rlock-AF_AX25 ->wlock-AF_AX25 FD: 1 BD: 4 +...: slock-AF_AX25 FD: 1 BD: 4 +...: clock-AF_AX25 FD: 1 BD: 4 +...: ax25_list_lock FD: 1 BD: 4 ....: &list->lock#34 FD: 1 BD: 4 ....: rlock-AF_AX25 FD: 1 BD: 4 ....: wlock-AF_AX25 FD: 1 BD: 13 +.+.: (work_completion)(&data->fib_flush_work) FD: 28 BD: 13 +.+.: ®ion->snapshot_lock ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1209 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1175#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1211 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond654#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1177#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond655#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 3 BD: 4 +.+.: port_mutex#2 ->local_port_range_lock.seqcount ->&pnsocks.lock FD: 1 BD: 5 ....: local_port_range_lock.seqcount FD: 169 BD: 1 +.+.: (wq_completion)bond1213 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond656#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1179#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond657#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1215 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond658#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1181#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond659#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1217 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond660#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1183#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond661#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1219 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond662#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1185#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond663#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1221 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond664#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1187#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond665#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond666#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1223 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1189#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond667#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond668#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1225 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1191#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond669#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond670#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1227 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1193#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond671#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond672#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1229 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1195#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond673#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond674#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1197#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1231 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond675#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond676#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1233 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1199#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 79 BD: 65 +.+.: &data->nh_lock ->fs_reclaim ->&c->lock ->&n->list_lock ->pool_lock#2 FD: 169 BD: 1 +.+.: (wq_completion)bond677#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond678#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1235 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1201#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond679#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond680#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1237 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1203#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond681#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond682#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1239 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1205#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond683#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond684#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1207#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1241 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond686#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1209#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1243 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond687#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond688#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1211#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1245 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 28 BD: 5 +.+.: &net->smc.mutex_fback_rsn ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond689#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond690#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1247 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1213#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1215#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond691#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1249 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond692#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1250 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond693#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1217#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond694#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1252 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 66 +..-: &____s->seqcount#13 FD: 169 BD: 1 +.+.: (wq_completion)bond695#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1253 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 2 BD: 1 +.-.: (&policy->timer) ->&policy->lock FD: 1 BD: 3 ++.-: &policy->lock FD: 169 BD: 1 +.+.: (wq_completion)bond1219#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond696#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1254 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond697#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1255 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1221#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond698#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1256 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1257 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond699#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1258 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1223#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond700#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1259 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond701#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1260 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1225#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond702#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1261 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond703#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1262 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1227#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond704#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1263 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond705#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1229#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1264 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond706#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 2 ....: &list->lock#35 FD: 169 BD: 1 +.+.: (wq_completion)bond1230 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1265 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond707#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1266 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond708#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1267 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1232 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond709#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1268 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1233#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond710#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1269 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1234 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond711#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1270 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond712#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1236 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1271 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond713#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1272 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1237#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1273 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1238 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond715#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 81 BD: 3 +.+.: crypto_default_null_skcipher_lock ->crypto_alg_sem ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock FD: 169 BD: 1 +.+.: (wq_completion)bond1274 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1239#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond716#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1240 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1275 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond717#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1241#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1276 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond718#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1242 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1277 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond719#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1243#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1278 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond720#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1244 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1279 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1245#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond721#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1280 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1246 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond722#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1281 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1247#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond723#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1282 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1248 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond724#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1283 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1249#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond725#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1284 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1250#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond726#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1285 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1251 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond727#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1286 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1252#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond728#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1253#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1287 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond729#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1254#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1288 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond730#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1255#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond731#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1289 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1256#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1290 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond732#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1257#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1291 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1258#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1292 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond734#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1259#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond249#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1293 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond735#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1260#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1294 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond736#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1261#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1295 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond737#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1262#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond250#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1296 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond738#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1263#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1297 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond739#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1264#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1298 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond740#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1265#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond196#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1299 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond741#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1266#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1300 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond197#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond742#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1267#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1301 ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1268#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1302 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond198#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond744#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1269#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1303 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1270#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1304 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond746#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1271#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1305 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond747#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1272#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1306 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond748#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1273#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1307 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond749#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1274#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1308 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond750#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1275#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1309 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond751#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1276#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1310 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond752#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1277#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1311 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond753#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1278#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1312 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond754#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1279#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1313 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond755#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1280#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1314 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond756#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1315 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond757#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1282#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1316 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1283#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1317 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1284#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond758#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1318 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1285#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond759#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1319 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1286#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond760#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1320 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1287#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond761#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1321 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1288#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond762#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1322 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond251#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1289#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1323 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1290#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1324 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond763#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1291#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1325 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1292#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1326 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond765#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1293#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond766#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1294#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1295#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1296#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond252#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1327 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1297#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond201#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond253#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond767#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond202#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond768#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond254#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1298#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond769#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1328 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1299#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond255#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1329 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond203#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1330 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1300#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond256#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1331 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1301#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond257#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1332 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1302#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond258#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1333 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1334 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond770#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1303#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1335 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond204#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond259#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond771#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1336 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1304#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1305#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1337 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1306#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond772#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1307#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1338 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond773#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1308#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1339 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1309#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond205#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1340 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1341 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond774#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1342 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1343 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1310#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1344 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1311#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond775#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1345 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond262#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1312#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1346 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1313#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1347 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1314#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1348 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1315#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1349 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1316#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1350 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond777#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond263#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond778#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond264#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1317#2 ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1318#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1351 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1319#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1352 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1320#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond779#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1353 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond207#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1321#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1354 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond265#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1322#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond781#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond266#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond782#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1355 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond267#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1323#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond783#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1356 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1324#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond784#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1357 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond785#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1358 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1359 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond268#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1325#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1360 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1326#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond786#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond269#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1361 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1327#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond787#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1362 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1328#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond788#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1363 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1329#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond270#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond789#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1330#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1364 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond271#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond790#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1331#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond791#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1332#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1365 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond792#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1366 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1333#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1334#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1367 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1335#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1368 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond793#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1336#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1369 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1337#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1370 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond208#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1338#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1339#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond794#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond209#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1371 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1340#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1372 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1373 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond272#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1341#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1374 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond795#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1342#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1343#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1376 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1344#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond273#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1377 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond274#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1345#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1378 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1346#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond796#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond211#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond797#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1380 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1347#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond212#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1381 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1348#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond213#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1382 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond214#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond215#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1383 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1349#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond798#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1384 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1350#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond799#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1385 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond217#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1386 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1351#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond218#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond275#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 5 ..-.: &pool->lock#4 FD: 1 BD: 1 ..-.: &pool->wait FD: 169 BD: 1 +.+.: (wq_completion)bond1387 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1388 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1352#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1389 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1353#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond802#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1390 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1354#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond803#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1391 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1355#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 156 BD: 8 +.+.: &sb->s_type->i_mutex_key#8/4 ->&rq->__lock ->mapping.invalidate_lock FD: 93 BD: 117 +.+.: &ei->i_data_sem/1 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&rq->__lock ->&ret->b_state_lock ->&ei->i_raw_lock ->&ei->i_es_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&ei->i_prealloc_lock ->&sb->s_type->i_lock_key#22 ->&wb->list_lock ->&journal->j_wait_updates ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond1392 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1356#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond805#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1393 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1357#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond806#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1394 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1358#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond807#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1395 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1359#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond808#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1396 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1360#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond809#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1397 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1361#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond810#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1398 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1362#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond811#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1399 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1363#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond812#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1400 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1364#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond813#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1401 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond814#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1366#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond815#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1403 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1367#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond816#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1404 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1368#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond817#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1405 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1369#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond818#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1370#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond819#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1407 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1371#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1408 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1372#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond820#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1409 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1373#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond821#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1410 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1374#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond822#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1411 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1375 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond823#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1412 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1376#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond824#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1413 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1377#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1414 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1378#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond825#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1415 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1416 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1380#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond826#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1417 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1381#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond827#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1418 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1382#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond828#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1419 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1384#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond829#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1420 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1385#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond830#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1421 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond831#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1422 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1387#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond832#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1423 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1388#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond833#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 169 BD: 1 +.+.: (wq_completion)bond1424 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1389#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond834#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1426 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1391#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond836#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1427 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond837#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1428 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1393#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond838#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond839#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1429 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1394#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond840#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1395#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond841#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1431 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1396#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1432 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1397#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond842#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1433 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1398#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1399#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond843#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1435 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1400#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1436 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond844#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1401#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1437 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1402 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond845#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1438 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1439 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond846#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1403#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1441 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1442 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1404#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1405#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1443 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1406 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond848#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1444 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1407#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond849#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1445 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1409#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1447 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1410#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond851#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond852#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1412#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1450 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond853#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1413#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1451 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond854#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1414#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1452 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond855#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 169 BD: 1 +.+.: (wq_completion)bond1415#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1416#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1453 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond856#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1417#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1454 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond857#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1455 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1419#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1456 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1420#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1457 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1421#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1458 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond858#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1459 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond859#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1422#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond860#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1460 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 169 BD: 1 +.+.: (wq_completion)bond1461 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) all lock chains: irq_context: 0 &obj_hash[i].lock irq_context: 0 &obj_hash[i].lock pool_lock irq_context: 0 cgroup_mutex irq_context: 0 (console_sem).lock irq_context: 0 cpu_hotplug_lock irq_context: 0 cpu_hotplug_lock jump_label_mutex irq_context: 0 cpu_hotplug_lock static_call_mutex irq_context: 0 cpu_hotplug_lock static_call_mutex text_mutex irq_context: 0 console_mutex irq_context: 0 console_mutex syslog_lock irq_context: 0 console_mutex (console_sem).lock irq_context: 0 console_mutex console_lock console_srcu console_owner_lock irq_context: 0 console_mutex console_lock console_srcu console_owner irq_context: 0 console_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 console_lock console_srcu console_owner_lock irq_context: 0 console_lock console_srcu console_owner irq_context: 0 console_lock console_srcu console_owner console_owner_lock irq_context: 0 input_pool.lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 clocksource_mutex irq_context: 0 clocksource_mutex watchdog_lock irq_context: 0 cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 resource_lock irq_context: 0 cache_disable_lock irq_context: 0 pgd_lock irq_context: 0 init_mm.page_table_lock irq_context: 0 init_mm.page_table_lock pgd_lock irq_context: 0 early_pfn_lock irq_context: 0 acpi_ioapic_lock irq_context: 0 acpi_ioapic_lock ioapic_lock irq_context: 0 acpi_ioapic_lock (console_sem).lock irq_context: 0 acpi_ioapic_lock console_lock console_srcu console_owner_lock irq_context: 0 acpi_ioapic_lock console_lock console_srcu console_owner irq_context: 0 acpi_ioapic_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 syscore_ops_lock irq_context: 0 map_entries_lock irq_context: 0 devtree_lock irq_context: 0 pcpu_lock irq_context: 0 param_lock irq_context: 0 base_crng.lock irq_context: 0 crng_init_wait.lock irq_context: 0 zonelist_update_seq irq_context: 0 zonelist_update_seq zonelist_update_seq.seqcount irq_context: 0 dmar_global_lock irq_context: 0 &zone->lock irq_context: 0 &zone->lock &____s->seqcount irq_context: 0 &pcp->lock &zone->lock irq_context: 0 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &____s->seqcount irq_context: 0 pool_lock#2 irq_context: 0 pcpu_alloc_mutex irq_context: 0 pcpu_alloc_mutex pcpu_lock irq_context: 0 &n->list_lock irq_context: 0 &c->lock irq_context: 0 slab_mutex irq_context: 0 slab_mutex pool_lock#2 irq_context: 0 slab_mutex &c->lock irq_context: 0 slab_mutex &n->list_lock irq_context: 0 slab_mutex pcpu_alloc_mutex irq_context: 0 slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 batched_entropy_u64.lock irq_context: 0 batched_entropy_u64.lock crngs.lock irq_context: 0 batched_entropy_u64.lock crngs.lock base_crng.lock irq_context: 0 espfix_init_mutex irq_context: 0 espfix_init_mutex &pcp->lock &zone->lock irq_context: 0 espfix_init_mutex &zone->lock irq_context: 0 espfix_init_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 espfix_init_mutex &____s->seqcount irq_context: 0 espfix_init_mutex pool_lock#2 irq_context: 0 percpu_counters_lock irq_context: 0 &mm->page_table_lock irq_context: 0 ptlock_ptr(page) irq_context: 0 ptlock_ptr(page)#2 irq_context: 0 trace_types_lock irq_context: 0 panic_notifier_list.lock irq_context: 0 die_chain.lock irq_context: 0 trace_event_sem irq_context: 0 batched_entropy_u32.lock irq_context: 0 batched_entropy_u32.lock crngs.lock irq_context: 0 &rq->__lock irq_context: 0 &rq->__lock rcu_read_lock &cfs_b->lock irq_context: 0 init_task.pi_lock irq_context: 0 init_task.pi_lock &rq->__lock irq_context: 0 init_task.vtime_seqcount irq_context: 0 slab_mutex &pcp->lock &zone->lock irq_context: 0 slab_mutex &zone->lock irq_context: 0 slab_mutex &____s->seqcount irq_context: 0 wq_pool_mutex irq_context: 0 wq_pool_mutex &pcp->lock &zone->lock irq_context: 0 wq_pool_mutex &zone->lock irq_context: 0 wq_pool_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 wq_pool_mutex &____s->seqcount irq_context: 0 wq_pool_mutex pool_lock#2 irq_context: 0 wq_pool_mutex &c->lock irq_context: 0 &wq->mutex irq_context: 0 &wq->mutex &pool->lock irq_context: 0 wq_pool_mutex &wq->mutex irq_context: 0 cpu_hotplug_lock wq_pool_mutex irq_context: 0 cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pcp->lock &zone->lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &zone->lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cpu_hotplug_lock wq_pool_mutex &____s->seqcount irq_context: 0 cpu_hotplug_lock wq_pool_mutex &c->lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 wq_pool_mutex &wq->mutex &pool->lock irq_context: 0 shrinker_rwsem irq_context: 0 rcu_node_0 irq_context: 0 rcu_state.barrier_lock irq_context: 0 rcu_state.barrier_lock rcu_node_0 irq_context: 0 &rnp->exp_poll_lock irq_context: 0 &rnp->exp_poll_lock rcu_read_lock &pool->lock irq_context: 0 &rnp->exp_poll_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 slab_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 trace_event_sem trace_event_ida.xa_lock irq_context: 0 trace_event_sem trace_event_ida.xa_lock &pcp->lock &zone->lock irq_context: 0 trace_event_sem trace_event_ida.xa_lock &zone->lock irq_context: 0 trace_event_sem trace_event_ida.xa_lock &____s->seqcount irq_context: 0 trace_event_sem trace_event_ida.xa_lock pool_lock#2 irq_context: 0 trace_event_sem trace_event_ida.xa_lock &c->lock irq_context: 0 trigger_cmd_mutex irq_context: 0 rcu_read_lock pool_lock#2 irq_context: 0 i8259A_lock irq_context: 0 irq_domain_mutex irq_context: 0 free_vmap_area_lock irq_context: 0 vmap_area_lock irq_context: 0 &irq_desc_lock_class irq_context: 0 vmap_purge_lock irq_context: 0 vmap_purge_lock purge_vmap_area_lock irq_context: 0 cpa_lock irq_context: 0 cpa_lock pgd_lock irq_context: 0 timekeeper_lock irq_context: 0 timekeeper_lock tk_core.seq.seqcount irq_context: 0 timekeeper_lock tk_core.seq.seqcount &obj_hash[i].lock irq_context: 0 tk_core.seq.seqcount irq_context: 0 &base->lock irq_context: 0 &base->lock &obj_hash[i].lock irq_context: 0 batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: 0 rcu_read_lock &pool->lock/1 irq_context: 0 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 pmus_lock irq_context: 0 pmus_lock pcpu_alloc_mutex irq_context: 0 pmus_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 pmus_lock &obj_hash[i].lock irq_context: 0 pmus_lock pool_lock#2 irq_context: 0 &swhash->hlist_mutex irq_context: 0 pmus_lock &cpuctx_mutex irq_context: 0 pmus_lock &obj_hash[i].lock pool_lock irq_context: 0 tty_ldiscs_lock irq_context: 0 console_lock irq_context: 0 console_lock resource_lock irq_context: 0 console_lock pool_lock#2 irq_context: 0 console_lock &obj_hash[i].lock irq_context: 0 console_lock &pcp->lock &zone->lock irq_context: 0 console_lock &zone->lock irq_context: 0 console_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 console_lock &____s->seqcount irq_context: 0 console_lock &c->lock irq_context: 0 console_lock kbd_event_lock irq_context: 0 console_lock kbd_event_lock led_lock irq_context: 0 console_lock vga_lock irq_context: 0 console_lock (console_sem).lock irq_context: 0 console_lock console_owner_lock irq_context: 0 console_mutex &port_lock_key irq_context: 0 console_mutex console_lock irq_context: 0 console_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 console_mutex console_srcu_srcu_usage.lock irq_context: 0 console_mutex console_srcu_srcu_usage.lock &obj_hash[i].lock irq_context: 0 console_mutex &ACCESS_PRIVATE(sdp, lock) irq_context: 0 console_mutex console_srcu irq_context: 0 console_lock console_srcu console_owner &port_lock_key irq_context: 0 init_task.alloc_lock irq_context: 0 acpi_ioremap_lock irq_context: 0 acpi_ioremap_lock pool_lock#2 irq_context: 0 acpi_ioremap_lock resource_lock irq_context: 0 acpi_ioremap_lock memtype_lock irq_context: 0 acpi_ioremap_lock free_vmap_area_lock irq_context: 0 acpi_ioremap_lock vmap_area_lock irq_context: 0 semaphore->lock irq_context: 0 *(&acpi_gbl_reference_count_lock) irq_context: 0 clockevents_lock irq_context: 0 clockevents_lock tk_core.seq.seqcount irq_context: 0 clockevents_lock tick_broadcast_lock irq_context: 0 clockevents_lock i8253_lock irq_context: 0 &desc->request_mutex irq_context: 0 &desc->request_mutex &irq_desc_lock_class irq_context: 0 &desc->request_mutex &irq_desc_lock_class i8259A_lock irq_context: 0 ioapic_lock irq_context: 0 ioapic_mutex irq_context: 0 ioapic_mutex &domain->mutex irq_context: 0 ioapic_mutex &domain->mutex pool_lock#2 irq_context: 0 ioapic_mutex &domain->mutex vector_lock irq_context: 0 ioapic_mutex &domain->mutex &irq_desc_lock_class irq_context: 0 ioapic_mutex &domain->mutex i8259A_lock irq_context: 0 ioapic_mutex &domain->mutex &c->lock irq_context: 0 ioapic_mutex &domain->mutex &pcp->lock &zone->lock irq_context: 0 ioapic_mutex &domain->mutex &zone->lock irq_context: 0 ioapic_mutex &domain->mutex &____s->seqcount irq_context: 0 vector_lock irq_context: 0 sysctl_lock irq_context: 0 tomoyo_policy_lock irq_context: 0 tomoyo_policy_lock pool_lock#2 irq_context: 0 aa_secids.xa_lock irq_context: 0 aa_secids.xa_lock pool_lock#2 irq_context: 0 aa_buffers_lock irq_context: hardirq jiffies_lock irq_context: hardirq jiffies_lock jiffies_seq.seqcount irq_context: hardirq hrtimer_bases.lock irq_context: hardirq hrtimer_bases.lock tk_core.seq.seqcount irq_context: hardirq log_wait.lock irq_context: 0 pernet_ops_rwsem irq_context: 0 pernet_ops_rwsem stack_depot_init_mutex irq_context: 0 pernet_ops_rwsem crngs.lock irq_context: 0 pernet_ops_rwsem net_rwsem irq_context: 0 pernet_ops_rwsem proc_inum_ida.xa_lock irq_context: 0 rtnl_mutex irq_context: 0 rtnl_mutex &c->lock irq_context: 0 rtnl_mutex &____s->seqcount irq_context: 0 rtnl_mutex pool_lock#2 irq_context: softirq drivers/char/random.c:1010 irq_context: softirq drivers/char/random.c:1010 input_pool.lock irq_context: 0 lock irq_context: 0 lock kernfs_idr_lock irq_context: 0 lock kernfs_idr_lock pool_lock#2 irq_context: 0 &root->kernfs_rwsem irq_context: 0 file_systems_lock irq_context: 0 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_lock irq_context: 0 &type->s_umount_key/1 irq_context: 0 &type->s_umount_key/1 pool_lock#2 irq_context: 0 &type->s_umount_key/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key/1 shrinker_rwsem irq_context: 0 &type->s_umount_key/1 shrinker_rwsem pool_lock#2 irq_context: 0 &type->s_umount_key/1 list_lrus_mutex irq_context: 0 &type->s_umount_key/1 sb_lock irq_context: 0 &type->s_umount_key/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key/1 percpu_counters_lock irq_context: 0 &type->s_umount_key/1 crngs.lock irq_context: 0 &type->s_umount_key/1 &sbinfo->stat_lock irq_context: 0 &type->s_umount_key/1 &____s->seqcount irq_context: 0 &type->s_umount_key/1 &c->lock irq_context: 0 &type->s_umount_key/1 &sb->s_type->i_lock_key irq_context: 0 &type->s_umount_key/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key/1 batched_entropy_u32.lock irq_context: 0 &type->s_umount_key/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key/1 &sb->s_type->i_lock_key &dentry->d_lock irq_context: 0 &type->s_umount_key/1 &dentry->d_lock irq_context: 0 mnt_id_ida.xa_lock irq_context: 0 &dentry->d_lock irq_context: 0 mount_lock irq_context: 0 mount_lock mount_lock.seqcount irq_context: 0 rcu_read_lock &dentry->d_lock irq_context: 0 &type->s_umount_key#2/1 irq_context: 0 &type->s_umount_key#2/1 pool_lock#2 irq_context: 0 &type->s_umount_key#2/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#2/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#2/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#2/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#2/1 sb_lock irq_context: 0 &type->s_umount_key#2/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#2/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#2/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#2/1 &____s->seqcount irq_context: 0 &type->s_umount_key#2/1 &c->lock irq_context: 0 &type->s_umount_key#2/1 &sb->s_type->i_lock_key#2 irq_context: 0 &type->s_umount_key#2/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#2/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#2/1 &sb->s_type->i_lock_key#2 &dentry->d_lock irq_context: 0 &type->s_umount_key#2/1 &dentry->d_lock irq_context: 0 ucounts_lock irq_context: 0 proc_inum_ida.xa_lock irq_context: 0 init_fs.lock irq_context: 0 init_fs.lock init_fs.seq.seqcount irq_context: 0 &type->s_umount_key#3/1 irq_context: 0 &type->s_umount_key#3/1 pool_lock#2 irq_context: 0 &type->s_umount_key#3/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#3/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#3/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#3/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#3/1 sb_lock irq_context: 0 &type->s_umount_key#3/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#3/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#3/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#3/1 &____s->seqcount irq_context: 0 &type->s_umount_key#3/1 &c->lock irq_context: 0 &type->s_umount_key#3/1 &sb->s_type->i_lock_key#3 irq_context: 0 &type->s_umount_key#3/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#3/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#3/1 &sb->s_type->i_lock_key#3 &dentry->d_lock irq_context: 0 &type->s_umount_key#3/1 &dentry->d_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex cpuhp_state-down irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex cpuhp_state-up irq_context: 0 proc_subdir_lock irq_context: 0 proc_subdir_lock irq_context: 0 pernet_ops_rwsem pool_lock#2 irq_context: 0 pernet_ops_rwsem proc_subdir_lock irq_context: 0 pernet_ops_rwsem proc_subdir_lock irq_context: 0 &type->s_umount_key#4/1 irq_context: 0 &type->s_umount_key#4/1 pool_lock#2 irq_context: 0 &type->s_umount_key#4/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#4/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#4/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#4/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#4/1 &c->lock irq_context: 0 &type->s_umount_key#4/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#4/1 &zone->lock irq_context: 0 &type->s_umount_key#4/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#4/1 &____s->seqcount irq_context: 0 &type->s_umount_key#4/1 sb_lock irq_context: 0 &type->s_umount_key#4/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#4/1 &sb->s_type->i_lock_key#4 irq_context: 0 &type->s_umount_key#4/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#4/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#4/1 &sb->s_type->i_lock_key#4 &dentry->d_lock irq_context: 0 &type->s_umount_key#4/1 &dentry->d_lock irq_context: 0 cgroup_mutex pcpu_alloc_mutex irq_context: 0 cgroup_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 cgroup_mutex pool_lock#2 irq_context: 0 cgroup_mutex lock irq_context: 0 cgroup_mutex lock kernfs_idr_lock irq_context: 0 cgroup_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 cgroup_mutex &root->kernfs_rwsem irq_context: 0 cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cgroup_mutex &obj_hash[i].lock irq_context: 0 cgroup_mutex cgroup_file_kn_lock irq_context: 0 cgroup_mutex css_set_lock irq_context: 0 lock cgroup_idr_lock irq_context: 0 lock cgroup_idr_lock pool_lock#2 irq_context: 0 cpuset_rwsem irq_context: 0 cpuset_rwsem cpuset_rwsem.rss.gp_wait.lock irq_context: 0 cpuset_rwsem rcu_node_0 irq_context: 0 cpuset_rwsem callback_lock irq_context: 0 cpuset_rwsem.waiters.lock irq_context: 0 cpuset_rwsem.rss.gp_wait.lock irq_context: 0 cpuset_rwsem.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 cgroup_mutex &c->lock irq_context: 0 cgroup_mutex &pcp->lock &zone->lock irq_context: 0 cgroup_mutex &zone->lock irq_context: 0 cgroup_mutex &____s->seqcount irq_context: 0 lock cgroup_idr_lock &c->lock irq_context: 0 lock cgroup_idr_lock &pcp->lock &zone->lock irq_context: 0 lock cgroup_idr_lock &zone->lock irq_context: 0 lock cgroup_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 lock cgroup_idr_lock &____s->seqcount irq_context: 0 cgroup_mutex blkcg_pol_mutex irq_context: 0 cgroup_mutex blkcg_pol_mutex pcpu_alloc_mutex irq_context: 0 cgroup_mutex blkcg_pol_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 cgroup_mutex lock cgroup_idr_lock irq_context: 0 cgroup_mutex lock cgroup_idr_lock pool_lock#2 irq_context: 0 cgroup_mutex &n->list_lock irq_context: 0 cgroup_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cgroup_mutex percpu_counters_lock irq_context: 0 cgroup_mutex shrinker_rwsem irq_context: 0 cgroup_mutex shrinker_rwsem &c->lock irq_context: 0 cgroup_mutex shrinker_rwsem &n->list_lock irq_context: 0 cgroup_mutex shrinker_rwsem &pcp->lock &zone->lock irq_context: 0 cgroup_mutex shrinker_rwsem &zone->lock irq_context: 0 cgroup_mutex shrinker_rwsem &____s->seqcount irq_context: 0 cgroup_mutex shrinker_rwsem pool_lock#2 irq_context: 0 cgroup_mutex &base->lock irq_context: 0 cgroup_mutex &base->lock &obj_hash[i].lock irq_context: 0 cgroup_mutex batched_entropy_u8.lock irq_context: 0 cgroup_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 cgroup_mutex &pgdat->memcg_lru.lock irq_context: 0 cgroup_mutex devcgroup_mutex irq_context: 0 cgroup_mutex cpu_hotplug_lock irq_context: 0 cgroup_mutex cpu_hotplug_lock freezer_mutex irq_context: 0 &pool->lock#2 irq_context: 0 spec_ctrl_mutex irq_context: 0 spec_ctrl_mutex cpu_hotplug_lock irq_context: 0 rcu_state.exp_mutex rcu_node_0 irq_context: 0 rcu_state.exp_mutex rcu_state.exp_wake_mutex irq_context: 0 rcu_state.exp_mutex rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[0] irq_context: 0 rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[1] irq_context: 0 init_sighand.siglock irq_context: 0 init_files.file_lock irq_context: 0 lock pidmap_lock irq_context: 0 lock pidmap_lock pool_lock#2 irq_context: 0 pidmap_lock irq_context: 0 cgroup_threadgroup_rwsem irq_context: 0 cgroup_threadgroup_rwsem css_set_lock irq_context: 0 cgroup_threadgroup_rwsem &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem tk_core.seq.seqcount irq_context: 0 cgroup_threadgroup_rwsem tasklist_lock irq_context: 0 cgroup_threadgroup_rwsem tasklist_lock init_task.pi_lock irq_context: 0 cgroup_threadgroup_rwsem tasklist_lock init_sighand.siglock irq_context: 0 cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock irq_context: 0 cgroup_threadgroup_rwsem css_set_lock &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock irq_context: 0 &p->pi_lock irq_context: 0 &p->pi_lock &rq->__lock irq_context: 0 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: 0 &p->pi_lock &rq->__lock &base->lock irq_context: 0 &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &p->pi_lock irq_context: 0 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 (kthreadd_done).wait.lock irq_context: 0 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sighand->siglock irq_context: 0 &p->alloc_lock irq_context: 0 &p->alloc_lock &____s->seqcount#2 irq_context: 0 fs_reclaim irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 wq_pool_mutex &pool->lock/1 irq_context: 0 wq_pool_mutex fs_reclaim irq_context: 0 wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 wq_pool_mutex rcu_read_lock pool_lock#2 irq_context: 0 wq_pool_mutex &obj_hash[i].lock irq_context: 0 wq_pool_mutex kthread_create_lock irq_context: 0 wq_pool_mutex &p->pi_lock irq_context: 0 wq_pool_mutex &p->pi_lock &rq->__lock irq_context: 0 wq_pool_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 wq_pool_mutex &rq->__lock irq_context: 0 kthread_create_lock irq_context: 0 cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem tasklist_lock &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem tasklist_lock &sighand->siglock irq_context: 0 wq_pool_mutex &x->wait irq_context: 0 wq_pool_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpuset_rwsem irq_context: 0 cpuset_rwsem &p->pi_lock irq_context: 0 cpuset_rwsem &p->pi_lock &rq->__lock irq_context: 0 cpuset_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &x->wait irq_context: 0 &x->wait &p->pi_lock irq_context: 0 &x->wait &p->pi_lock &rq->__lock irq_context: 0 &x->wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 wq_pool_attach_mutex irq_context: 0 wq_mayday_lock irq_context: 0 &xa->xa_lock irq_context: 0 &pool->lock irq_context: 0 &pool->lock &p->pi_lock irq_context: 0 &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (&pool->mayday_timer) irq_context: 0 &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rnp->exp_poll_wq) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rnp->exp_poll_wq) &rnp->exp_poll_lock irq_context: 0 &pool->lock/1 irq_context: 0 &pool->lock/1 &p->pi_lock irq_context: 0 &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (&wq_watchdog_timer) irq_context: 0 &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) allocation_wait.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq allocation_wait.lock irq_context: hardirq allocation_wait.lock &p->pi_lock irq_context: hardirq allocation_wait.lock &p->pi_lock &rq->__lock irq_context: hardirq allocation_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 batched_entropy_u8.lock irq_context: 0 kfence_freelist_lock irq_context: 0 rcu_tasks.cbs_gbl_lock irq_context: 0 rcu_tasks.cbs_gbl_lock (console_sem).lock irq_context: 0 rcu_tasks.cbs_gbl_lock console_lock console_srcu console_owner_lock irq_context: 0 rcu_tasks.cbs_gbl_lock console_lock console_srcu console_owner irq_context: 0 rcu_tasks.cbs_gbl_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 rcu_tasks.cbs_gbl_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 rcu_tasks.cbs_gbl_lock rcu_tasks__percpu.cbs_pcpu_lock irq_context: 0 rcu_tasks.cbs_gbl_lock rcu_tasks__percpu.cbs_pcpu_lock &obj_hash[i].lock irq_context: 0 rcu_tasks.cbs_gbl_lock &ACCESS_PRIVATE(rtpcp, lock) irq_context: 0 rcu_tasks.cbs_gbl_lock &ACCESS_PRIVATE(rtpcp, lock) &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &base->lock &obj_hash[i].lock irq_context: 0 rcu_tasks_trace.cbs_gbl_lock irq_context: 0 rcu_tasks_trace.cbs_gbl_lock rcu_tasks_trace__percpu.cbs_pcpu_lock irq_context: 0 rcu_tasks_trace.cbs_gbl_lock rcu_tasks_trace__percpu.cbs_pcpu_lock &obj_hash[i].lock irq_context: 0 rcu_tasks_trace.cbs_gbl_lock rcu_tasks_trace__percpu.cbs_pcpu_lock &obj_hash[i].lock pool_lock irq_context: 0 rcu_tasks_trace.cbs_gbl_lock &ACCESS_PRIVATE(rtpcp, lock) irq_context: 0 rcu_tasks_trace.cbs_gbl_lock &ACCESS_PRIVATE(rtpcp, lock) &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_tasks.cbs_gbl_lock irq_context: 0 rcu_tasks.tasks_gp_mutex &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &x->wait#2 irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_tasks__percpu.cbs_pcpu_lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[2] irq_context: 0 rcu_tasks.tasks_gp_mutex &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex &base->lock irq_context: 0 rcu_tasks.tasks_gp_mutex &base->lock &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex &ACCESS_PRIVATE(sdp, lock) irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock rcu_read_lock &pool->lock irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) tasks_rcu_exit_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &base->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex &x->wait#3 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_tasks_trace.cbs_gbl_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &rq->__lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (null) irq_context: 0 (null) tk_core.seq.seqcount irq_context: softirq &(&ssp->srcu_sup->work)->timer irq_context: softirq &(&ssp->srcu_sup->work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&ssp->srcu_sup->work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&ssp->srcu_sup->work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&ssp->srcu_sup->work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&ssp->srcu_sup->work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex tasks_rcu_exit_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &ACCESS_PRIVATE(sdp, lock) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex kernel/rcu/tasks.h:147 irq_context: softirq &(&kfence_timer)->timer irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&timer.timer) irq_context: softirq (&timer.timer) &p->pi_lock irq_context: softirq (&timer.timer) &p->pi_lock &rq->__lock irq_context: softirq (&timer.timer) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_tasks.tasks_gp_mutex (&timer.timer) irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[3] irq_context: 0 rcu_tasks.tasks_gp_mutex &x->wait#2 irq_context: 0 rcu_tasks.tasks_gp_mutex &x->wait#2 &p->pi_lock irq_context: 0 rcu_tasks.tasks_gp_mutex &x->wait#2 &p->pi_lock &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex &x->wait#2 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_tasks_trace__percpu.cbs_pcpu_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex cpu_hotplug_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex cpu_hotplug_lock rcu_tasks_trace__percpu.cbs_pcpu_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex cpu_hotplug_lock &ACCESS_PRIVATE(rtpcp, lock) irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[0] irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &x->wait#2 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &x->wait#2 &p->pi_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &x->wait#2 &p->pi_lock &rq->__lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &x->wait#2 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nmi_desc[0].lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock fs_reclaim irq_context: 0 cpu_hotplug_lock smpboot_threads_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_hotplug_lock smpboot_threads_lock batched_entropy_u8.lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock kfence_freelist_lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock pool_lock#2 irq_context: 0 cpu_hotplug_lock smpboot_threads_lock kthread_create_lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &p->pi_lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &x->wait irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &rq->__lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &c->lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &n->list_lock irq_context: 0 &rcu_state.gp_wq irq_context: 0 cpu_hotplug_lock smpboot_threads_lock cpuset_rwsem irq_context: 0 cpu_hotplug_lock smpboot_threads_lock cpuset_rwsem &p->pi_lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock cpuset_rwsem &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock cpuset_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &stop_pi_lock irq_context: 0 &stop_pi_lock &rq->__lock irq_context: 0 &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &stopper->lock irq_context: 0 (module_notify_list).rwsem irq_context: 0 ddebug_lock irq_context: 0 &pmus_srcu irq_context: 0 watchdog_mutex irq_context: 0 watchdog_mutex cpu_hotplug_lock irq_context: 0 watchdog_mutex cpu_hotplug_lock &obj_hash[i].lock irq_context: 0 watchdog_mutex cpu_hotplug_lock rcu_read_lock &pool->lock irq_context: 0 watchdog_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 watchdog_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 watchdog_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 watchdog_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 watchdog_mutex cpu_hotplug_lock &x->wait#4 irq_context: 0 watchdog_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 watchdog_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) &x->wait#5 irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) hrtimer_bases.lock irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) &x->wait#4 irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) &x->wait#4 &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) &x->wait#4 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) &x->wait#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &rcu_state.gp_wq &p->pi_lock irq_context: softirq &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: softirq &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback cpuset_rwsem.rss.gp_wait.lock irq_context: 0 &newf->file_lock irq_context: 0 init_fs.lock &dentry->d_lock irq_context: 0 &p->vtime.seqcount irq_context: 0 cpu_hotplug_lock mem_hotplug_lock irq_context: 0 cpu_hotplug_lock mem_hotplug_lock mem_hotplug_lock.rss.gp_wait.lock irq_context: 0 cpu_hotplug_lock mem_hotplug_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 cpu_hotplug_lock mem_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex irq_context: 0 cpu_hotplug_lock mem_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 cpu_hotplug_lock mem_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 cpu_hotplug_lock mem_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[1] irq_context: 0 cpu_hotplug_lock mem_hotplug_lock.waiters.lock irq_context: 0 cpu_hotplug_lock mem_hotplug_lock.rss.gp_wait.lock irq_context: 0 cpu_hotplug_lock mem_hotplug_lock.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 cpu_add_remove_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock cpu_hotplug_lock.rss.gp_wait.lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[2] irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock fs_reclaim irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock pool_lock#2 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock kthread_create_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &p->pi_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &p->pi_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &x->wait irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &obj_hash[i].lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock cpuset_rwsem irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock cpuset_rwsem &p->pi_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock cpuset_rwsem &p->pi_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock cpuset_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &swhash->hlist_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pmus_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pmus_lock &cpuctx_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pcp_batch_high_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &xa->xa_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock fs_reclaim irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pool_lock#2 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock kthread_create_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &p->pi_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &p->pi_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &x->wait irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &obj_hash[i].lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock wq_pool_attach_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &pool->lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &pool->lock &p->pi_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pcpu_alloc_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock relay_channels_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_node_0 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &c->lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &n->list_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &pcp->lock &zone->lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &zone->lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &____s->seqcount irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock text_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock fs_reclaim irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock pool_lock#2 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock free_vmap_area_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock vmap_area_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &pcp->lock &zone->lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &zone->lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &____s->seqcount irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock init_mm.page_table_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock (console_sem).lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock console_lock console_srcu console_owner_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock console_lock console_srcu console_owner irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock rtc_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rq->__lock &rq->__lock/1 irq_context: 0 &rq->__lock/1 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &x->wait#6 irq_context: 0 &x->wait#6 irq_context: 0 &x->wait#6 &p->pi_lock irq_context: 0 &x->wait#6 &p->pi_lock &rq->__lock irq_context: 0 &x->wait#6 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up irq_context: 0 cpu_hotplug_lock cpuhp_state-up smpboot_threads_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up smpboot_threads_lock &p->pi_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up smpboot_threads_lock &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up smpboot_threads_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up smpboot_threads_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock cpuhp_state-up sparse_irq_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up sparse_irq_lock &irq_desc_lock_class irq_context: 0 cpu_hotplug_lock cpuhp_state-up &swhash->hlist_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up pmus_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up pmus_lock &cpuctx_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up &x->wait#5 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up hrtimer_bases.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 cpu_hotplug_lock cpuhp_state-up hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex wq_pool_attach_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex wq_pool_attach_mutex &p->pi_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex wq_pool_attach_mutex &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex wq_pool_attach_mutex &x->wait#7 irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex wq_pool_attach_mutex &pool->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex &wq->mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex &pool->lock/1 irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_node_0 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up jump_label_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up jump_label_mutex text_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &rq->__lock &rt_b->rt_runtime_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &rq->__lock &rt_b->rt_runtime_lock &rt_rq->rt_runtime_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &rq->__lock rcu_read_lock &cfs_b->lock irq_context: hardirq &rq->__lock &cfs_rq->removed.lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock.waiters.lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock.rss.gp_wait.lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock.rss.gp_wait.lock &obj_hash[i].lock pool_lock irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex cpu_hotplug_lock irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex (console_sem).lock irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex console_lock console_srcu console_owner_lock irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex console_lock console_srcu console_owner irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex &rq->__lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[3] irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &obj_hash[i].lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &base->lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &base->lock &obj_hash[i].lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock jump_label_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 cpu_add_remove_lock cpuset_hotplug_work irq_context: 0 cpu_hotplug_lock stop_cpus_mutex irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &stopper->lock irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &rq->__lock irq_context: 0 &x->wait#8 irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &x->wait#8 irq_context: 0 sched_domains_mutex irq_context: 0 sched_domains_mutex fs_reclaim irq_context: 0 sched_domains_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sched_domains_mutex pool_lock#2 irq_context: 0 sched_domains_mutex &obj_hash[i].lock irq_context: 0 sched_domains_mutex pcpu_alloc_mutex irq_context: 0 sched_domains_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sched_domains_mutex &c->lock irq_context: 0 sched_domains_mutex &n->list_lock irq_context: 0 sched_domains_mutex &pcp->lock &zone->lock irq_context: 0 sched_domains_mutex &zone->lock irq_context: 0 sched_domains_mutex &____s->seqcount irq_context: 0 sched_domains_mutex rcu_read_lock &rq->__lock irq_context: 0 sched_domains_mutex rcu_read_lock &rq->__lock &cp->lock irq_context: 0 sched_domains_mutex rcu_read_lock &rq->__lock &rt_b->rt_runtime_lock irq_context: 0 sched_domains_mutex rcu_read_lock &rq->__lock &rt_b->rt_runtime_lock &rt_rq->rt_runtime_lock irq_context: 0 sched_domains_mutex rcu_read_lock &rq->__lock rcu_read_lock &cfs_b->lock irq_context: 0 sched_domains_mutex pcpu_lock irq_context: 0 slab_mutex fs_reclaim irq_context: 0 slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (memory_chain).rwsem irq_context: 0 cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#5/1 irq_context: 0 &type->s_umount_key#5/1 fs_reclaim irq_context: 0 &type->s_umount_key#5/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#5/1 pool_lock#2 irq_context: 0 &type->s_umount_key#5/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#5/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#5/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#5/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#5/1 sb_lock irq_context: 0 &type->s_umount_key#5/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#5/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#5/1 percpu_counters_lock irq_context: 0 &type->s_umount_key#5/1 crngs.lock irq_context: 0 &type->s_umount_key#5/1 &sbinfo->stat_lock irq_context: 0 &type->s_umount_key#5/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#5/1 &sb->s_type->i_lock_key#5 irq_context: 0 &type->s_umount_key#5/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#5/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#5/1 batched_entropy_u32.lock irq_context: 0 &type->s_umount_key#5/1 &sb->s_type->i_lock_key#5 &dentry->d_lock irq_context: 0 &type->s_umount_key#5/1 &dentry->d_lock irq_context: 0 (setup_done).wait.lock irq_context: 0 namespace_sem irq_context: 0 namespace_sem fs_reclaim irq_context: 0 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 namespace_sem &pcp->lock &zone->lock irq_context: 0 namespace_sem &zone->lock irq_context: 0 namespace_sem &____s->seqcount irq_context: 0 namespace_sem pool_lock#2 irq_context: 0 namespace_sem &c->lock irq_context: 0 namespace_sem mnt_id_ida.xa_lock irq_context: 0 namespace_sem pcpu_alloc_mutex irq_context: 0 namespace_sem pcpu_alloc_mutex pcpu_lock irq_context: 0 namespace_sem &dentry->d_lock irq_context: 0 namespace_sem mount_lock irq_context: 0 namespace_sem mount_lock mount_lock.seqcount irq_context: 0 &p->alloc_lock init_fs.lock irq_context: 0 rcu_read_lock &____s->seqcount#3 irq_context: 0 file_systems_lock irq_context: 0 &type->s_umount_key#6 irq_context: 0 &type->s_umount_key#6 fs_reclaim irq_context: 0 &type->s_umount_key#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#6 pool_lock#2 irq_context: 0 &type->s_umount_key#6 &dentry->d_lock irq_context: 0 &type->s_umount_key#6 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#6 &zone->lock irq_context: 0 &type->s_umount_key#6 &____s->seqcount irq_context: 0 &type->s_umount_key#6 &c->lock irq_context: 0 &type->s_umount_key#6 &lru->node[i].lock irq_context: 0 &type->s_umount_key#6 &sbinfo->stat_lock irq_context: 0 &type->s_umount_key#6 rcu_read_lock &dentry->d_lock irq_context: 0 &type->s_umount_key#6 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key irq_context: 0 &sb->s_type->i_mutex_key namespace_sem irq_context: 0 &sb->s_type->i_mutex_key namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key namespace_sem fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key namespace_sem pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key namespace_sem rename_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem rename_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key namespace_sem rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount &new_ns->poll irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount pool_lock#2 irq_context: 0 rcu_read_lock &sb->s_type->i_lock_key#2 irq_context: 0 rcu_read_lock &____s->seqcount#4 irq_context: 0 &sb->s_type->i_lock_key#5 irq_context: 0 &fs->lock irq_context: 0 &fs->lock &____s->seqcount#3 irq_context: 0 (setup_done).wait.lock &p->pi_lock irq_context: 0 (setup_done).wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (setup_done).wait.lock &p->pi_lock &rq->__lock irq_context: 0 (setup_done).wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 req_lock irq_context: softirq rcu_callback &obj_hash[i].lock irq_context: softirq rcu_callback pool_lock#2 irq_context: 0 of_mutex irq_context: 0 of_mutex fs_reclaim irq_context: 0 of_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 of_mutex pool_lock#2 irq_context: 0 of_mutex lock irq_context: 0 of_mutex lock kernfs_idr_lock irq_context: 0 of_mutex &root->kernfs_rwsem irq_context: 0 of_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &x->wait#9 irq_context: 0 &k->list_lock irq_context: 0 bus_type_sem irq_context: 0 &root->kernfs_rwsem irq_context: 0 &dev->power.lock irq_context: 0 dpm_list_mtx irq_context: 0 uevent_sock_mutex irq_context: 0 running_helpers_waitq.lock irq_context: 0 sysfs_symlink_target_lock irq_context: 0 &k->k_lock irq_context: 0 &dev->mutex &k->list_lock irq_context: 0 &dev->mutex &k->k_lock irq_context: 0 &dev->mutex &dev->power.lock irq_context: 0 subsys mutex irq_context: 0 memory_blocks.xa_lock irq_context: 0 memory_blocks.xa_lock pool_lock#2 irq_context: softirq &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: softirq (&timer.timer) &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_callback mem_hotplug_lock.rss.gp_wait.lock irq_context: softirq rcu_callback cpu_hotplug_lock.rss.gp_wait.lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex (&timer.timer) irq_context: 0 rcu_tasks_trace.tasks_gp_mutex (console_sem).lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex console_lock console_srcu console_owner_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex console_lock console_srcu console_owner irq_context: 0 rcu_tasks_trace.tasks_gp_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 lock kernfs_idr_lock &c->lock irq_context: 0 lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 lock kernfs_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 lock kernfs_idr_lock &____s->seqcount irq_context: 0 rcu_tasks_trace.tasks_gp_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 subsys mutex#2 irq_context: 0 register_lock irq_context: 0 register_lock proc_subdir_lock irq_context: 0 register_lock fs_reclaim irq_context: 0 register_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 register_lock pool_lock#2 irq_context: 0 register_lock proc_inum_ida.xa_lock irq_context: 0 register_lock proc_subdir_lock irq_context: 0 register_lock &c->lock irq_context: 0 register_lock &pcp->lock &zone->lock irq_context: 0 register_lock &zone->lock irq_context: 0 register_lock &____s->seqcount irq_context: 0 register_lock proc_inum_ida.xa_lock &pcp->lock &zone->lock irq_context: 0 register_lock proc_inum_ida.xa_lock &zone->lock irq_context: 0 register_lock proc_inum_ida.xa_lock &____s->seqcount irq_context: 0 register_lock proc_inum_ida.xa_lock pool_lock#2 irq_context: 0 register_lock proc_inum_ida.xa_lock &c->lock irq_context: 0 register_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &rq->__lock irq_context: 0 rcu_read_lock &stopper->lock irq_context: 0 rcu_read_lock &stop_pi_lock irq_context: 0 rcu_read_lock &stop_pi_lock &rq->__lock irq_context: 0 rcu_read_lock &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (pm_chain_head).rwsem irq_context: 0 cpufreq_governor_mutex irq_context: 0 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 rcu_state.exp_mutex &rq->__lock irq_context: 0 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &stopper->lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &stop_pi_lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &stop_pi_lock &rq->__lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pool->lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_node_0 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[2] &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[2] &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[2] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[3] &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[3] &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[3] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dyn_event_ops_mutex irq_context: 0 binfmt_lock irq_context: 0 pin_fs_lock irq_context: 0 &type->s_umount_key#7/1 irq_context: 0 &type->s_umount_key#7/1 fs_reclaim irq_context: 0 &type->s_umount_key#7/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#7/1 pool_lock#2 irq_context: 0 &type->s_umount_key#7/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#7/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#7/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#7/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#7/1 sb_lock irq_context: 0 &type->s_umount_key#7/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#7/1 &____s->seqcount irq_context: 0 &type->s_umount_key#7/1 &c->lock irq_context: 0 &type->s_umount_key#7/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#7/1 &sb->s_type->i_lock_key#6 irq_context: 0 &type->s_umount_key#7/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#7/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#7/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#7/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#7/1 &sb->s_type->i_lock_key#6 &dentry->d_lock irq_context: 0 &type->s_umount_key#7/1 &dentry->d_lock irq_context: 0 rcu_read_lock mount_lock irq_context: 0 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#2 irq_context: 0 &sb->s_type->i_mutex_key#2 &sb->s_type->i_lock_key#6 irq_context: 0 &sb->s_type->i_mutex_key#2 rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#2 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#2 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#2 rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#2 &dentry->d_lock &wq irq_context: 0 &sb->s_type->i_mutex_key#2 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#2 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#2 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#2 &sb->s_type->i_lock_key#6 &dentry->d_lock irq_context: 0 chrdevs_lock irq_context: 0 cb_lock irq_context: 0 cb_lock genl_mutex irq_context: 0 cb_lock genl_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex pool_lock#2 irq_context: 0 &type->s_umount_key#8/1 irq_context: 0 &type->s_umount_key#8/1 fs_reclaim irq_context: 0 &type->s_umount_key#8/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#8/1 pool_lock#2 irq_context: 0 &type->s_umount_key#8/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#8/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#8/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#8/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#8/1 sb_lock irq_context: 0 &type->s_umount_key#8/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#8/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#8/1 &sb->s_type->i_lock_key#7 irq_context: 0 &type->s_umount_key#8/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#8/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#8/1 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &type->s_umount_key#8/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#3 irq_context: 0 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 subsys mutex#3 irq_context: 0 async_lock irq_context: 0 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex device_links_srcu irq_context: 0 regulator_list_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &dev->power.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex fwnode_link_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex device_links_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex lock irq_context: 0 rtc_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &x->wait#9 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &dev->devres_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex regulator_nesting_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex regulator_ww_class_mutex regulator_nesting_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex devtree_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex bus_type_sem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex dpm_list_mtx irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#4 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#4 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex pin_fs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex regulator_list_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex deferred_probe_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex probe_waitqueue.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) async_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) async_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) async_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) async_done.lock irq_context: 0 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#9/1 irq_context: 0 &type->s_umount_key#9/1 fs_reclaim irq_context: 0 &type->s_umount_key#9/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#9/1 pool_lock#2 irq_context: 0 &type->s_umount_key#9/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#9/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#9/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#9/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#9/1 sb_lock irq_context: 0 &type->s_umount_key#9/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#9/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#9/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#9/1 &____s->seqcount irq_context: 0 &type->s_umount_key#9/1 &c->lock irq_context: 0 &type->s_umount_key#9/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#9/1 &sb->s_type->i_lock_key#8 irq_context: 0 &type->s_umount_key#9/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#9/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#9/1 &sb->s_type->i_lock_key#8 &dentry->d_lock irq_context: 0 &type->s_umount_key#9/1 &dentry->d_lock irq_context: 0 pernet_ops_rwsem fs_reclaim irq_context: 0 pernet_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem &____s->seqcount irq_context: 0 pernet_ops_rwsem &c->lock irq_context: 0 pernet_ops_rwsem sysctl_lock irq_context: 0 pack_mutex irq_context: 0 pack_mutex fs_reclaim irq_context: 0 pack_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pack_mutex &pcp->lock &zone->lock irq_context: 0 pack_mutex &zone->lock irq_context: 0 pack_mutex &____s->seqcount irq_context: 0 pack_mutex pool_lock#2 irq_context: 0 pack_mutex free_vmap_area_lock irq_context: 0 pack_mutex vmap_area_lock irq_context: 0 pack_mutex init_mm.page_table_lock irq_context: 0 pack_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pack_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 pack_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pack_mutex vmap_purge_lock irq_context: 0 pack_mutex vmap_purge_lock purge_vmap_area_lock irq_context: 0 pack_mutex cpa_lock irq_context: 0 pack_mutex cpa_lock pgd_lock irq_context: 0 text_mutex irq_context: 0 text_mutex ptlock_ptr(page)#2 irq_context: 0 &fp->aux->used_maps_mutex irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex pcpu_lock irq_context: 0 proto_list_mutex irq_context: 0 targets_mutex irq_context: 0 nl_table_lock irq_context: 0 nl_table_wait.lock irq_context: 0 net_family_lock irq_context: 0 pernet_ops_rwsem net_generic_ids.xa_lock irq_context: 0 pernet_ops_rwsem mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem &sb->s_type->i_lock_key#8 irq_context: 0 pernet_ops_rwsem &dir->lock irq_context: 0 pernet_ops_rwsem &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK k-slock-AF_NETLINK irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rhashtable_bucket irq_context: 0 pernet_ops_rwsem k-slock-AF_NETLINK irq_context: 0 pernet_ops_rwsem nl_table_lock irq_context: 0 pernet_ops_rwsem nl_table_wait.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex irq_context: 0 rtnl_mutex fs_reclaim irq_context: 0 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sparse_irq_lock irq_context: 0 sparse_irq_lock fs_reclaim irq_context: 0 sparse_irq_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sparse_irq_lock pool_lock#2 irq_context: 0 sparse_irq_lock lock irq_context: 0 sparse_irq_lock lock kernfs_idr_lock irq_context: 0 sparse_irq_lock &root->kernfs_rwsem irq_context: 0 sparse_irq_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sparse_irq_lock &c->lock irq_context: 0 sparse_irq_lock &____s->seqcount irq_context: 0 sparse_irq_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 sparse_irq_lock &pcp->lock &zone->lock irq_context: 0 sparse_irq_lock &zone->lock irq_context: 0 &list->lock irq_context: 0 kauditd_wait.lock irq_context: 0 lock#2 irq_context: 0 lock#2 &zone->lock irq_context: 0 pcp_batch_high_lock irq_context: 0 khugepaged_mutex irq_context: 0 gdp_mutex irq_context: 0 gdp_mutex &k->list_lock irq_context: 0 gdp_mutex fs_reclaim irq_context: 0 gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 gdp_mutex pool_lock#2 irq_context: 0 gdp_mutex lock irq_context: 0 gdp_mutex lock kernfs_idr_lock irq_context: 0 gdp_mutex &root->kernfs_rwsem irq_context: 0 gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 subsys mutex#5 irq_context: 0 subsys mutex#5 &k->k_lock irq_context: 0 subsys mutex#6 irq_context: 0 subsys mutex#6 &k->list_lock irq_context: 0 subsys mutex#6 &k->k_lock irq_context: 0 regmap_debugfs_early_lock irq_context: 0 (acpi_reconfig_chain).rwsem irq_context: 0 __i2c_board_lock irq_context: 0 core_lock irq_context: 0 core_lock &k->list_lock irq_context: 0 core_lock &k->k_lock irq_context: 0 cb_lock genl_mutex nl_table_lock irq_context: 0 cb_lock genl_mutex nl_table_wait.lock irq_context: 0 nl_table_lock irq_context: 0 thermal_governor_lock irq_context: 0 thermal_governor_lock thermal_list_lock irq_context: 0 cpuidle_lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 cpuidle_lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpuidle_lock rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 cpuidle_lock rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[0] &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[0] &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[0] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpuidle_lock &obj_hash[i].lock irq_context: 0 cpuidle_lock (console_sem).lock irq_context: 0 cpuidle_lock console_lock console_srcu console_owner_lock irq_context: 0 cpuidle_lock console_lock console_srcu console_owner irq_context: 0 cpuidle_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 cpuidle_lock console_lock console_srcu console_owner console_owner_lock irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_lock_key#8 irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &dir->lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 k-sk_lock-AF_QIPCRTR irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &rcu_state.expedited_wq irq_context: hardirq &rcu_state.expedited_wq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[1] &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[1] &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[1] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 k-sk_lock-AF_QIPCRTR k-slock-AF_QIPCRTR irq_context: 0 k-slock-AF_QIPCRTR irq_context: 0 k-sk_lock-AF_QIPCRTR fs_reclaim irq_context: 0 k-sk_lock-AF_QIPCRTR fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 k-sk_lock-AF_QIPCRTR qrtr_ports.xa_lock irq_context: 0 k-sk_lock-AF_QIPCRTR batched_entropy_u8.lock irq_context: 0 k-sk_lock-AF_QIPCRTR batched_entropy_u8.lock crngs.lock irq_context: 0 k-sk_lock-AF_QIPCRTR kfence_freelist_lock irq_context: 0 k-sk_lock-AF_QIPCRTR pool_lock#2 irq_context: 0 k-sk_lock-AF_QIPCRTR qrtr_node_lock irq_context: 0 k-sk_lock-AF_QIPCRTR &obj_hash[i].lock irq_context: 0 k-sk_lock-AF_QIPCRTR &meta->lock irq_context: 0 freezer_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex irq_context: 0 audit_backlog_wait.lock irq_context: 0 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 crngs.lock irq_context: 0 (crypto_chain).rwsem irq_context: 0 iova_cache_mutex irq_context: 0 iova_cache_mutex cpu_hotplug_lock irq_context: 0 iova_cache_mutex cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 iova_cache_mutex slab_mutex irq_context: 0 iova_cache_mutex slab_mutex fs_reclaim irq_context: 0 iova_cache_mutex slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 iova_cache_mutex slab_mutex pool_lock#2 irq_context: 0 iova_cache_mutex slab_mutex &c->lock irq_context: 0 iova_cache_mutex slab_mutex &n->list_lock irq_context: 0 iova_cache_mutex slab_mutex pcpu_alloc_mutex irq_context: 0 iova_cache_mutex slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 subsys mutex#7 irq_context: 0 subsys mutex#7 &k->k_lock irq_context: 0 pci_config_lock irq_context: 0 device_links_lock irq_context: 0 subsys mutex#8 irq_context: 0 dev_pm_qos_mtx irq_context: 0 dev_pm_qos_mtx fs_reclaim irq_context: 0 dev_pm_qos_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 dev_pm_qos_mtx pool_lock#2 irq_context: 0 dev_pm_qos_mtx &dev->power.lock irq_context: 0 dev_pm_qos_mtx pm_qos_lock irq_context: 0 dev_pm_qos_sysfs_mtx irq_context: 0 dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 dev_pm_qos_sysfs_mtx fs_reclaim irq_context: 0 dev_pm_qos_sysfs_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 dev_pm_qos_sysfs_mtx pool_lock#2 irq_context: 0 dev_pm_qos_sysfs_mtx lock irq_context: 0 dev_pm_qos_sysfs_mtx lock kernfs_idr_lock irq_context: 0 dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 uidhash_lock irq_context: 0 cpuset_rwsem &rq->__lock irq_context: 0 &rq->__lock rcu_read_lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock wq_pool_mutex (console_sem).lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex console_lock console_srcu console_owner_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex console_lock console_srcu console_owner irq_context: 0 cpu_hotplug_lock wq_pool_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 cpu_hotplug_lock wq_pool_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &type->s_umount_key#27/1 crypto_alg_sem irq_context: 0 &type->s_umount_key#27/1 quarantine_lock irq_context: 0 &type->s_umount_key#27/1 shrinker_rwsem irq_context: 0 oom_reaper_wait.lock irq_context: 0 subsys mutex#9 irq_context: 0 &pgdat->kcompactd_wait irq_context: 0 slab_mutex rcu_read_lock &pool->lock irq_context: 0 slab_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 slab_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 slab_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 slab_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 slab_mutex &rq->__lock irq_context: 0 (wq_completion)events pcpu_balance_work irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex fs_reclaim irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex pool_lock#2 irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex free_vmap_area_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex vmap_area_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &zone->lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &____s->seqcount irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex init_mm.page_table_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &c->lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &rq->__lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 slab_mutex pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 slab_mutex pcpu_alloc_mutex &rq->__lock irq_context: 0 slab_mutex pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex.wait_lock irq_context: 0 (wq_completion)events pcpu_balance_work &p->pi_lock irq_context: 0 (wq_completion)events pcpu_balance_work &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events pcpu_balance_work &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events pcpu_balance_work &rq->__lock irq_context: 0 memory_tier_lock irq_context: 0 memory_tier_lock fs_reclaim irq_context: 0 memory_tier_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 memory_tier_lock pool_lock#2 irq_context: 0 memory_tier_lock &x->wait#9 irq_context: 0 memory_tier_lock &obj_hash[i].lock irq_context: 0 memory_tier_lock &k->list_lock irq_context: 0 memory_tier_lock &pcp->lock &zone->lock irq_context: 0 memory_tier_lock &zone->lock irq_context: 0 memory_tier_lock &____s->seqcount irq_context: 0 memory_tier_lock rcu_read_lock pool_lock#2 irq_context: 0 memory_tier_lock lock irq_context: 0 memory_tier_lock lock kernfs_idr_lock irq_context: 0 memory_tier_lock &root->kernfs_rwsem irq_context: 0 memory_tier_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 memory_tier_lock bus_type_sem irq_context: 0 memory_tier_lock sysfs_symlink_target_lock irq_context: 0 memory_tier_lock &k->k_lock irq_context: 0 memory_tier_lock &c->lock irq_context: 0 memory_tier_lock &root->kernfs_rwsem irq_context: 0 memory_tier_lock &dev->power.lock irq_context: 0 memory_tier_lock dpm_list_mtx irq_context: 0 memory_tier_lock uevent_sock_mutex irq_context: 0 memory_tier_lock running_helpers_waitq.lock irq_context: 0 memory_tier_lock &dev->mutex &k->list_lock irq_context: 0 memory_tier_lock &dev->mutex &k->k_lock irq_context: 0 memory_tier_lock &dev->mutex &dev->power.lock irq_context: 0 memory_tier_lock subsys mutex#10 irq_context: 0 memory_tier_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 memory_tier_lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 memory_tier_lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 memory_tier_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 memory_tier_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 memory_tier_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 memory_tier_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 memory_tier_lock rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 memory_tier_lock rcu_state.exp_mutex &rq->__lock irq_context: 0 memory_tier_lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq &rcu_state.expedited_wq &p->pi_lock irq_context: 0 khugepaged_mutex fs_reclaim irq_context: 0 khugepaged_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 khugepaged_mutex pool_lock#2 irq_context: 0 khugepaged_mutex kthread_create_lock irq_context: 0 khugepaged_mutex &p->pi_lock irq_context: 0 khugepaged_mutex &p->pi_lock &rq->__lock irq_context: 0 khugepaged_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 khugepaged_mutex &x->wait irq_context: 0 khugepaged_mutex &rq->__lock irq_context: 0 khugepaged_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ksm_thread_mutex irq_context: 0 ksm_thread_wait.lock irq_context: 0 khugepaged_mutex &obj_hash[i].lock irq_context: 0 khugepaged_mutex lock#2 irq_context: 0 khugepaged_mutex lock#2 &zone->lock irq_context: 0 khugepaged_mutex pcp_batch_high_lock irq_context: 0 damon_ops_lock irq_context: 0 crypto_alg_sem irq_context: 0 crypto_alg_sem (crypto_chain).rwsem irq_context: 0 cpu_hotplug_lock fs_reclaim irq_context: 0 cpu_hotplug_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_hotplug_lock pool_lock#2 irq_context: 0 cpu_hotplug_lock pcpu_alloc_mutex irq_context: 0 cpu_hotplug_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 cpu_hotplug_lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock &wq->mutex irq_context: 0 cpu_hotplug_lock &wq->mutex &pool->lock irq_context: 0 cpu_hotplug_lock kthread_create_lock irq_context: 0 cpu_hotplug_lock &p->pi_lock irq_context: 0 cpu_hotplug_lock &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock &x->wait irq_context: 0 cpu_hotplug_lock &rq->__lock irq_context: 0 cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock#3 irq_context: 0 khugepaged_mm_lock irq_context: 0 khugepaged_wait.lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock pool_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &wq->mutex irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) wq_pool_mutex irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) pool_lock#2 irq_context: 0 bio_slab_lock irq_context: 0 bio_slab_lock fs_reclaim irq_context: 0 bio_slab_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 bio_slab_lock pool_lock#2 irq_context: 0 bio_slab_lock slab_mutex irq_context: 0 bio_slab_lock slab_mutex fs_reclaim irq_context: 0 bio_slab_lock slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 bio_slab_lock slab_mutex pool_lock#2 irq_context: 0 bio_slab_lock slab_mutex &c->lock irq_context: 0 bio_slab_lock slab_mutex &n->list_lock irq_context: 0 bio_slab_lock slab_mutex pcpu_alloc_mutex irq_context: 0 bio_slab_lock slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 bio_slab_lock bio_slabs.xa_lock irq_context: 0 bio_slab_lock bio_slabs.xa_lock pool_lock#2 irq_context: 0 major_names_lock irq_context: 0 major_names_lock fs_reclaim irq_context: 0 major_names_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 major_names_lock pool_lock#2 irq_context: 0 major_names_lock major_names_spinlock irq_context: 0 lock kernfs_idr_lock &zone->lock irq_context: 0 quarantine_lock irq_context: 0 slab_mutex remove_cache_srcu irq_context: 0 slab_mutex remove_cache_srcu quarantine_lock irq_context: 0 console_lock fs_reclaim irq_context: 0 console_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 console_lock &x->wait#9 irq_context: 0 console_lock &k->list_lock irq_context: 0 console_lock gdp_mutex irq_context: 0 console_lock gdp_mutex &k->list_lock irq_context: 0 console_lock gdp_mutex fs_reclaim irq_context: 0 console_lock gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 console_lock gdp_mutex pool_lock#2 irq_context: 0 console_lock gdp_mutex lock irq_context: 0 console_lock gdp_mutex lock kernfs_idr_lock irq_context: 0 console_lock gdp_mutex &root->kernfs_rwsem irq_context: 0 console_lock gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 console_lock lock irq_context: 0 console_lock lock kernfs_idr_lock irq_context: 0 console_lock &root->kernfs_rwsem irq_context: 0 console_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 console_lock bus_type_sem irq_context: 0 console_lock sysfs_symlink_target_lock irq_context: 0 console_lock &root->kernfs_rwsem irq_context: 0 console_lock &dev->power.lock irq_context: 0 console_lock dpm_list_mtx irq_context: 0 console_lock uevent_sock_mutex irq_context: 0 console_lock running_helpers_waitq.lock irq_context: 0 console_lock subsys mutex#11 irq_context: 0 console_lock subsys mutex#11 &k->k_lock irq_context: 0 &type->s_umount_key#27/1 sb_lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex (console_sem).lock irq_context: 0 rcu_tasks.tasks_gp_mutex console_lock console_srcu console_owner_lock irq_context: 0 rcu_tasks.tasks_gp_mutex console_lock console_srcu console_owner irq_context: 0 rcu_tasks.tasks_gp_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 rcu_tasks.tasks_gp_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &meta->lock irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#28/1 irq_context: 0 *(&acpi_gbl_hardware_lock) irq_context: 0 *(&acpi_gbl_gpe_lock) irq_context: 0 acpi_ioapic_lock ioapic_mutex irq_context: 0 &desc->request_mutex &irq_desc_lock_class vector_lock irq_context: 0 &desc->request_mutex &irq_desc_lock_class ioapic_lock irq_context: 0 &desc->request_mutex &irq_desc_lock_class mask_lock irq_context: 0 &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock irq_context: 0 &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock vector_lock irq_context: 0 &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock ioapic_lock irq_context: 0 &desc->request_mutex &irq_desc_lock_class ioapic_lock i8259A_lock irq_context: 0 shrink_qlist.lock irq_context: 0 remove_cache_srcu_srcu_usage.lock irq_context: 0 remove_cache_srcu_srcu_usage.lock &obj_hash[i].lock irq_context: 0 &ACCESS_PRIVATE(sdp, lock) irq_context: 0 remove_cache_srcu irq_context: 0 remove_cache_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 remove_cache_srcu_srcu_usage.lock rcu_read_lock &pool->lock irq_context: 0 remove_cache_srcu_srcu_usage.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 remove_cache_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 remove_cache_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 remove_cache_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex remove_cache_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) remove_cache_srcu_srcu_usage.lock irq_context: 0 &x->wait#3 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex remove_cache_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cpu_hotplug_lock flush_lock irq_context: 0 cpu_hotplug_lock flush_lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock &pool->lock irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock flush_lock (work_completion)(&sfw->work) irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock (wq_completion)slub_flushwq irq_context: 0 cpu_hotplug_lock flush_lock &x->wait#10 irq_context: 0 cpu_hotplug_lock flush_lock &rq->__lock irq_context: 0 cpu_hotplug_lock flush_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)slub_flushwq irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&sfw->work) irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&sfw->work) &c->lock irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&sfw->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&sfw->work) &obj_hash[i].lock irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&sfw->work) &n->list_lock irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&barr->work) irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&barr->work) &x->wait#10 irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &n->list_lock &c->lock irq_context: 0 system_transition_mutex irq_context: 0 (power_off_prep_handler_list).rwsem irq_context: 0 power_off_handler_list.lock irq_context: 0 (restart_prep_handler_list).rwsem irq_context: 0 (reboot_notifier_list).rwsem irq_context: 0 *(&acpi_gbl_gpe_lock) (console_sem).lock irq_context: 0 *(&acpi_gbl_gpe_lock) console_lock console_srcu console_owner_lock irq_context: 0 *(&acpi_gbl_gpe_lock) console_lock console_srcu console_owner irq_context: 0 *(&acpi_gbl_gpe_lock) console_lock console_srcu console_owner &port_lock_key irq_context: 0 *(&acpi_gbl_gpe_lock) console_lock console_srcu console_owner console_owner_lock irq_context: 0 acpi_scan_lock irq_context: 0 acpi_scan_lock semaphore->lock irq_context: 0 acpi_scan_lock fs_reclaim irq_context: 0 acpi_scan_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock pool_lock#2 irq_context: 0 acpi_scan_lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock &x->wait#9 irq_context: 0 acpi_scan_lock &c->lock irq_context: 0 acpi_scan_lock &pcp->lock &zone->lock irq_context: 0 acpi_scan_lock &zone->lock irq_context: 0 acpi_scan_lock &____s->seqcount irq_context: 0 acpi_scan_lock &n->list_lock irq_context: 0 acpi_scan_lock &n->list_lock &c->lock irq_context: 0 acpi_scan_lock acpi_device_lock irq_context: 0 acpi_scan_lock acpi_device_lock fs_reclaim irq_context: 0 acpi_scan_lock acpi_device_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock acpi_device_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_device_lock &xa->xa_lock#2 irq_context: 0 acpi_scan_lock acpi_device_lock semaphore->lock irq_context: 0 acpi_scan_lock acpi_device_lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock &k->list_lock irq_context: 0 acpi_scan_lock lock irq_context: 0 acpi_scan_lock lock kernfs_idr_lock irq_context: 0 acpi_scan_lock &root->kernfs_rwsem irq_context: 0 acpi_scan_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock bus_type_sem irq_context: 0 acpi_scan_lock sysfs_symlink_target_lock irq_context: 0 acpi_scan_lock &k->k_lock irq_context: 0 acpi_scan_lock &root->kernfs_rwsem irq_context: 0 acpi_scan_lock &dev->power.lock irq_context: 0 acpi_scan_lock dpm_list_mtx irq_context: 0 acpi_scan_lock &dev->mutex &k->list_lock irq_context: 0 acpi_scan_lock &dev->mutex &k->k_lock irq_context: 0 acpi_scan_lock &dev->mutex &dev->power.lock irq_context: 0 acpi_scan_lock subsys mutex#12 irq_context: 0 acpi_scan_lock uevent_sock_mutex irq_context: 0 acpi_scan_lock running_helpers_waitq.lock irq_context: 0 acpi_scan_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 acpi_scan_lock rcu_read_lock pool_lock#2 irq_context: 0 acpi_scan_lock *(&acpi_gbl_reference_count_lock) irq_context: 0 acpi_scan_lock &obj_hash[i].lock pool_lock irq_context: 0 acpi_scan_lock acpi_device_lock &c->lock irq_context: 0 acpi_scan_lock acpi_device_lock &____s->seqcount irq_context: 0 acpi_scan_lock batched_entropy_u8.lock irq_context: 0 acpi_scan_lock kfence_freelist_lock irq_context: 0 acpi_scan_lock pci_config_lock irq_context: 0 acpi_scan_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 acpi_scan_lock &meta->lock irq_context: 0 acpi_scan_lock quarantine_lock irq_context: 0 acpi_scan_lock (console_sem).lock irq_context: 0 acpi_scan_lock console_lock console_srcu console_owner_lock irq_context: 0 acpi_scan_lock console_lock console_srcu console_owner irq_context: 0 acpi_scan_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 acpi_scan_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 acpi_scan_lock pci_bus_sem irq_context: 0 acpi_scan_lock pci_mmcfg_lock irq_context: 0 acpi_scan_lock resource_lock irq_context: 0 acpi_scan_lock &device->physical_node_lock irq_context: 0 acpi_scan_lock &device->physical_node_lock sysfs_symlink_target_lock irq_context: 0 acpi_scan_lock &device->physical_node_lock fs_reclaim irq_context: 0 acpi_scan_lock &device->physical_node_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock &device->physical_node_lock pool_lock#2 irq_context: 0 acpi_scan_lock &device->physical_node_lock lock irq_context: 0 acpi_scan_lock &device->physical_node_lock lock kernfs_idr_lock irq_context: 0 acpi_scan_lock &device->physical_node_lock &root->kernfs_rwsem irq_context: 0 acpi_scan_lock &device->physical_node_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock fwnode_link_lock irq_context: 0 acpi_scan_lock fwnode_link_lock &k->k_lock irq_context: 0 acpi_scan_lock devtree_lock irq_context: 0 acpi_scan_lock gdp_mutex irq_context: 0 acpi_scan_lock gdp_mutex &k->list_lock irq_context: 0 acpi_scan_lock gdp_mutex fs_reclaim irq_context: 0 acpi_scan_lock gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock gdp_mutex pool_lock#2 irq_context: 0 acpi_scan_lock gdp_mutex lock irq_context: 0 acpi_scan_lock gdp_mutex lock kernfs_idr_lock irq_context: 0 acpi_scan_lock gdp_mutex &root->kernfs_rwsem irq_context: 0 acpi_scan_lock gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock subsys mutex#13 irq_context: 0 acpi_scan_lock subsys mutex#13 &k->k_lock irq_context: 0 acpi_scan_lock pci_bus_sem irq_context: 0 acpi_scan_lock pci_acpi_companion_lookup_sem irq_context: 0 acpi_scan_lock pci_slot_mutex irq_context: 0 acpi_scan_lock tk_core.seq.seqcount irq_context: 0 acpi_scan_lock resource_alignment_lock irq_context: 0 acpi_scan_lock device_links_srcu irq_context: 0 acpi_scan_lock &dev->power.lock &dev->power.lock/1 irq_context: 0 acpi_scan_lock subsys mutex#14 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock semaphore->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock fs_reclaim irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock *(&acpi_gbl_reference_count_lock) irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock fs_reclaim irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock wakeup_ida.xa_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &x->wait#9 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &k->list_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &k->list_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex fs_reclaim irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex pool_lock#2 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex lock kernfs_idr_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &root->kernfs_rwsem irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock lock kernfs_idr_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &root->kernfs_rwsem irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock bus_type_sem irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock sysfs_symlink_target_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &c->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &____s->seqcount irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock uevent_sock_mutex irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock running_helpers_waitq.lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &k->k_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock subsys mutex#15 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock subsys mutex#15 &k->k_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock events_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &pcp->lock &zone->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 acpi_scan_lock &rq->__lock irq_context: 0 &pgdat->kswapd_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &obj_hash[i].lock pool_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock fill_pool_map-wait-type-override pool_lock irq_context: softirq drivers/char/random.c:251 irq_context: softirq drivers/char/random.c:251 rcu_read_lock &pool->lock/1 irq_context: softirq drivers/char/random.c:251 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq drivers/char/random.c:251 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq drivers/char/random.c:251 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq drivers/char/random.c:251 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (next_reseed).work irq_context: 0 (wq_completion)events_unbound (next_reseed).work &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (next_reseed).work &base->lock irq_context: 0 (wq_completion)events_unbound (next_reseed).work &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (next_reseed).work input_pool.lock irq_context: 0 (wq_completion)events_unbound (next_reseed).work base_crng.lock irq_context: 0 acpi_scan_lock pci_rescan_remove_lock irq_context: 0 acpi_scan_lock pci_rescan_remove_lock &dev->mutex &dev->power.lock irq_context: 0 acpi_scan_lock pci_rescan_remove_lock &dev->mutex &k->list_lock irq_context: 0 acpi_scan_lock pci_rescan_remove_lock &dev->mutex &k->k_lock irq_context: 0 acpi_scan_lock subsys mutex#3 irq_context: 0 acpi_scan_lock acpi_link_lock irq_context: 0 acpi_scan_lock acpi_link_lock fs_reclaim irq_context: 0 acpi_scan_lock acpi_link_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock acpi_link_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_link_lock semaphore->lock irq_context: 0 acpi_scan_lock acpi_link_lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock acpi_link_lock *(&acpi_gbl_reference_count_lock) irq_context: 0 acpi_scan_lock acpi_link_lock pci_config_lock irq_context: 0 acpi_scan_lock acpi_link_lock &pcp->lock &zone->lock irq_context: 0 acpi_scan_lock acpi_link_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 acpi_scan_lock acpi_link_lock &____s->seqcount irq_context: 0 acpi_scan_lock acpi_link_lock &zone->lock irq_context: 0 acpi_scan_lock acpi_link_lock rcu_read_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_link_lock (console_sem).lock irq_context: 0 acpi_scan_lock acpi_link_lock console_lock console_srcu console_owner_lock irq_context: 0 acpi_scan_lock acpi_link_lock console_lock console_srcu console_owner irq_context: 0 acpi_scan_lock acpi_link_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 acpi_scan_lock acpi_link_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 acpi_scan_lock acpi_link_lock &c->lock irq_context: 0 acpi_scan_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 acpi_scan_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 acpi_scan_lock wakeup_ida.xa_lock irq_context: 0 acpi_scan_lock subsys mutex#15 irq_context: 0 acpi_scan_lock subsys mutex#15 &k->k_lock irq_context: 0 acpi_scan_lock events_lock irq_context: 0 acpi_scan_lock power_resource_list_lock irq_context: 0 acpi_device_lock irq_context: 0 &(&priv->bus_notifier)->rwsem irq_context: 0 k-sk_lock-AF_NETLINK irq_context: 0 k-sk_lock-AF_NETLINK k-slock-AF_NETLINK irq_context: 0 k-sk_lock-AF_NETLINK rcu_read_lock rhashtable_bucket irq_context: 0 k-slock-AF_NETLINK irq_context: 0 &type->s_umount_key#10/1 irq_context: 0 &type->s_umount_key#10/1 fs_reclaim irq_context: 0 &type->s_umount_key#10/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#10/1 pool_lock#2 irq_context: 0 &type->s_umount_key#10/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#10/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#10/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#10/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#10/1 sb_lock irq_context: 0 &type->s_umount_key#10/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#10/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#10/1 &zone->lock irq_context: 0 &type->s_umount_key#10/1 &____s->seqcount irq_context: 0 &type->s_umount_key#10/1 &c->lock irq_context: 0 &type->s_umount_key#10/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#10/1 &sb->s_type->i_lock_key#9 irq_context: 0 &type->s_umount_key#10/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#10/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#10/1 &sb->s_type->i_lock_key#9 &dentry->d_lock irq_context: 0 &type->s_umount_key#10/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#11/1 irq_context: 0 &type->s_umount_key#11/1 fs_reclaim irq_context: 0 &type->s_umount_key#11/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#11/1 pool_lock#2 irq_context: 0 &type->s_umount_key#11/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#11/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#11/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#11/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#11/1 sb_lock irq_context: 0 &type->s_umount_key#11/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#11/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#11/1 &____s->seqcount irq_context: 0 &type->s_umount_key#11/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#11/1 &zone->lock irq_context: 0 &type->s_umount_key#11/1 rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#11/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#11/1 &sb->s_type->i_lock_key#10 irq_context: 0 &type->s_umount_key#11/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#11/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#11/1 &sb->s_type->i_lock_key#10 &dentry->d_lock irq_context: 0 &type->s_umount_key#11/1 &dentry->d_lock irq_context: 0 &mm->mmap_lock irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire reservation_ww_class_mutex irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire reservation_ww_class_mutex fs_reclaim irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire reservation_ww_class_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire reservation_ww_class_mutex fs_reclaim &mapping->i_mmap_rwsem irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire reservation_ww_class_mutex fs_reclaim mmu_notifier_invalidate_range_start dma_fence_map irq_context: 0 delayed_uprobe_lock irq_context: 0 key irq_context: 0 attribute_container_mutex irq_context: 0 triggers_list_lock irq_context: 0 leds_list_lock irq_context: 0 bus_type_sem irq_context: 0 (usb_notifier_list).rwsem irq_context: softirq mm/vmstat.c:2014 irq_context: softirq mm/vmstat.c:2014 rcu_read_lock &pool->lock irq_context: softirq mm/vmstat.c:2014 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq mm/vmstat.c:2014 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq mm/vmstat.c:2014 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq mm/vmstat.c:2014 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (shepherd).work irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (shepherd).work &obj_hash[i].lock irq_context: 0 (wq_completion)events (shepherd).work &base->lock irq_context: 0 (wq_completion)events (shepherd).work &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mm_percpu_wq irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &obj_hash[i].lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &base->lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &base->lock &obj_hash[i].lock irq_context: 0 batched_entropy_u8.lock crngs.lock irq_context: 0 &device->physical_node_lock irq_context: 0 rc_map_lock irq_context: 0 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 fill_pool_map-wait-type-override pool_lock irq_context: 0 subsys mutex#16 irq_context: 0 resource_lock irq_context: 0 free_vmap_area_lock &obj_hash[i].lock irq_context: 0 free_vmap_area_lock pool_lock#2 irq_context: 0 &entry->access irq_context: 0 info_mutex irq_context: 0 info_mutex proc_subdir_lock irq_context: 0 info_mutex fs_reclaim irq_context: 0 info_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 info_mutex pool_lock#2 irq_context: 0 info_mutex proc_inum_ida.xa_lock irq_context: 0 info_mutex proc_subdir_lock irq_context: 0 kobj_ns_type_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &x->wait#9 irq_context: 0 pernet_ops_rwsem rtnl_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &k->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex lock irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex bus_type_sem irq_context: 0 pernet_ops_rwsem rtnl_mutex sysfs_symlink_target_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex &dev->power.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dpm_list_mtx irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex running_helpers_waitq.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex subsys mutex#17 irq_context: 0 pernet_ops_rwsem rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &dir->lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_base_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex input_pool.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_wait.lock irq_context: 0 rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock nl_table_lock irq_context: 0 rcu_read_lock nl_table_wait.lock irq_context: 0 qdisc_mod_lock irq_context: 0 rcu_read_lock &c->lock irq_context: 0 rcu_read_lock &pcp->lock &zone->lock irq_context: 0 rcu_read_lock &zone->lock irq_context: 0 rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rcu_read_lock &____s->seqcount irq_context: 0 rtnl_mutex &zone->lock irq_context: 0 rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 bt_proto_lock irq_context: 0 hci_cb_list_lock irq_context: 0 mgmt_chan_list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 rate_ctrl_mutex irq_context: 0 rate_ctrl_mutex fs_reclaim irq_context: 0 rate_ctrl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rate_ctrl_mutex pool_lock#2 irq_context: 0 netlbl_domhsh_lock irq_context: 0 netlbl_unlhsh_lock irq_context: 0 rcu_read_lock netlbl_domhsh_lock irq_context: 0 rcu_read_lock netlbl_domhsh_lock pool_lock#2 irq_context: 0 misc_mtx irq_context: 0 misc_mtx fs_reclaim irq_context: 0 misc_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 misc_mtx pool_lock#2 irq_context: 0 misc_mtx &x->wait#9 irq_context: 0 misc_mtx &obj_hash[i].lock irq_context: 0 misc_mtx &k->list_lock irq_context: 0 misc_mtx gdp_mutex irq_context: 0 misc_mtx gdp_mutex &k->list_lock irq_context: 0 misc_mtx gdp_mutex fs_reclaim irq_context: 0 misc_mtx gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 misc_mtx gdp_mutex pool_lock#2 irq_context: 0 misc_mtx gdp_mutex lock irq_context: 0 misc_mtx gdp_mutex lock kernfs_idr_lock irq_context: 0 misc_mtx gdp_mutex &root->kernfs_rwsem irq_context: 0 misc_mtx gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 misc_mtx lock irq_context: 0 misc_mtx lock kernfs_idr_lock irq_context: 0 misc_mtx &root->kernfs_rwsem irq_context: 0 misc_mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 misc_mtx bus_type_sem irq_context: 0 misc_mtx sysfs_symlink_target_lock irq_context: 0 misc_mtx &root->kernfs_rwsem irq_context: 0 misc_mtx &c->lock irq_context: 0 misc_mtx &pcp->lock &zone->lock irq_context: 0 misc_mtx &zone->lock irq_context: 0 misc_mtx &____s->seqcount irq_context: 0 misc_mtx &dev->power.lock irq_context: 0 misc_mtx dpm_list_mtx irq_context: 0 misc_mtx req_lock irq_context: 0 misc_mtx &p->pi_lock irq_context: 0 misc_mtx &p->pi_lock &rq->__lock irq_context: 0 misc_mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 misc_mtx &x->wait#11 irq_context: 0 misc_mtx &rq->__lock irq_context: 0 misc_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers irq_context: 0 sb_writers mount_lock irq_context: 0 misc_mtx rcu_read_lock &rq->__lock irq_context: 0 misc_mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &type->i_mutex_dir_key/1 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 rename_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 fs_reclaim irq_context: 0 sb_writers &type->i_mutex_dir_key/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key/1 pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sbinfo->stat_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &pcp->lock &zone->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &zone->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &____s->seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_lock_key#5 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &s->s_inode_list_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tk_core.seq.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 batched_entropy_u32.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 batched_entropy_u32.lock crngs.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_lock_key#5 &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 tk_core.seq.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 rcu_read_lock &dentry->d_lock irq_context: 0 &x->wait#11 irq_context: 0 &x->wait#11 &p->pi_lock irq_context: 0 &x->wait#11 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &x->wait#11 &p->pi_lock &rq->__lock irq_context: 0 &x->wait#11 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 misc_mtx uevent_sock_mutex irq_context: 0 misc_mtx running_helpers_waitq.lock irq_context: 0 misc_mtx subsys mutex#18 irq_context: 0 misc_mtx subsys mutex#18 &k->k_lock irq_context: 0 rcu_read_lock &pool->lock irq_context: 0 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 input_mutex irq_context: 0 input_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) rfkill_global_mutex irq_context: 0 input_mutex input_devices_poll_wait.lock irq_context: 0 (netlink_chain).rwsem irq_context: 0 proto_tab_lock irq_context: 0 resource_lock pool_lock#2 irq_context: 0 resource_lock &obj_hash[i].lock irq_context: 0 random_ready_notifier.lock irq_context: 0 random_ready_notifier.lock crngs.lock irq_context: 0 misc_mtx misc_minors_ida.xa_lock irq_context: 0 misc_mtx lock kernfs_idr_lock pool_lock#2 irq_context: 0 vga_lock#2 irq_context: 0 vga_lock#2 pci_config_lock irq_context: 0 vga_lock#2 (console_sem).lock irq_context: 0 vga_lock#2 console_lock console_srcu console_owner_lock irq_context: 0 vga_lock#2 console_lock console_srcu console_owner irq_context: 0 vga_lock#2 console_lock console_srcu console_owner &port_lock_key irq_context: 0 vga_lock#2 console_lock console_srcu console_owner console_owner_lock irq_context: 0 &type->s_umount_key#12/1 irq_context: 0 &type->s_umount_key#12/1 fs_reclaim irq_context: 0 &type->s_umount_key#12/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#12/1 pool_lock#2 irq_context: 0 &type->s_umount_key#12/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#12/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#12/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#12/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#12/1 sb_lock irq_context: 0 &type->s_umount_key#12/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#12/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#12/1 &sb->s_type->i_lock_key#11 irq_context: 0 &type->s_umount_key#12/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#12/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#12/1 &sb->s_type->i_lock_key#11 &dentry->d_lock irq_context: 0 &type->s_umount_key#12/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_lock_key#11 irq_context: 0 clocksource_mutex cpu_hotplug_lock irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &stopper->lock irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock &rq->__lock irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &rq->__lock irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &x->wait#8 irq_context: 0 clocksource_mutex (console_sem).lock irq_context: 0 clocksource_mutex console_lock console_srcu console_owner_lock irq_context: 0 clocksource_mutex console_lock console_srcu console_owner irq_context: 0 clocksource_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 clocksource_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#28/1 fs_reclaim irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#28/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#28/1 pcpu_alloc_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#28/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#13/1 irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#28/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#13/1 fs_reclaim irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#28/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#13/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#28/1 sb_lock irq_context: 0 &type->s_umount_key#13/1 pool_lock#2 irq_context: 0 &type->s_umount_key#28/1 irq_context: 0 &type->s_umount_key#13/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#13/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#28/1 fs_reclaim irq_context: 0 &type->s_umount_key#13/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#28/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#13/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#28/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#13/1 sb_lock irq_context: 0 &type->s_umount_key#28/1 &____s->seqcount irq_context: 0 &type->s_umount_key#13/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#28/1 &xa->xa_lock#7 irq_context: 0 &type->s_umount_key#28/1 lock#4 irq_context: 0 &type->s_umount_key#13/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#28/1 &mapping->private_lock irq_context: 0 &type->s_umount_key#28/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#28/1 &dd->lock irq_context: 0 &type->s_umount_key#13/1 &sb->s_type->i_lock_key#12 irq_context: 0 &type->s_umount_key#28/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#13/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#13/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#28/1 rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#13/1 &sb->s_type->i_lock_key#12 &dentry->d_lock irq_context: 0 &type->s_umount_key#13/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#28/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#28/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#28/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#28/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#28/1 bit_wait_table + i irq_context: 0 &type->s_umount_key#28/1 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#5 irq_context: 0 &sb->s_type->i_mutex_key#5 &sb->s_type->i_lock_key#12 irq_context: 0 &type->s_umount_key#28/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#5 rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#5 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_lock_key#3 irq_context: 0 &sb->s_type->i_mutex_key#5 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#5 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#5 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_lock_key#3 &xa->xa_lock#7 irq_context: 0 &sb->s_type->i_mutex_key#5 &dentry->d_lock &wq irq_context: 0 &type->s_umount_key#28/1 lock#4 &lruvec->lru_lock irq_context: 0 &sb->s_type->i_mutex_key#5 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#5 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#28/1 lock#5 irq_context: 0 &sb->s_type->i_mutex_key#5 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#28/1 &lruvec->lru_lock irq_context: 0 &sb->s_type->i_mutex_key#5 &sb->s_type->i_lock_key#12 &dentry->d_lock irq_context: 0 &type->s_umount_key#28/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#28/1 &zone->lock irq_context: 0 &type->s_umount_key#28/1 crypto_alg_sem irq_context: 0 &type->s_umount_key#28/1 pool_lock#2 irq_context: 0 &type->s_umount_key#28/1 &xa->xa_lock#7 pool_lock#2 irq_context: 0 &type->s_umount_key#28/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->s_umount_key#28/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#28/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#28/1 percpu_counters_lock irq_context: 0 &type->s_umount_key#28/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#28/1 inode_hash_lock irq_context: 0 &type->s_umount_key#28/1 &c->lock irq_context: 0 &type->s_umount_key#28/1 inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 &type->s_umount_key#28/1 inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#28/1 rcu_read_lock &dd->lock irq_context: 0 &type->s_umount_key#28/1 rcu_read_lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#28/1 rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#28/1 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#15/1 irq_context: 0 &type->s_umount_key#15/1 fs_reclaim irq_context: 0 &type->s_umount_key#28/1 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &type->s_umount_key#15/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_lock_key#22 irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#8 irq_context: 0 &type->s_umount_key#15/1 pool_lock#2 irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#8 &ei->i_es_lock irq_context: 0 &type->s_umount_key#15/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem irq_context: 0 &type->s_umount_key#15/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem pool_lock#2 irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock irq_context: 0 &type->s_umount_key#15/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#15/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#15/1 sb_lock irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &zone->lock irq_context: 0 &type->s_umount_key#15/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &type->s_umount_key#15/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &type->s_umount_key#28/1 &xa->xa_lock#7 &c->lock irq_context: 0 &type->s_umount_key#28/1 &xa->xa_lock#7 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#15/1 &sb->s_type->i_lock_key#13 irq_context: 0 &type->s_umount_key#28/1 &xa->xa_lock#7 &zone->lock irq_context: 0 &type->s_umount_key#15/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#28/1 &xa->xa_lock#7 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#15/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#28/1 &xa->xa_lock#7 &____s->seqcount irq_context: 0 &type->s_umount_key#15/1 &sb->s_type->i_lock_key#13 &dentry->d_lock irq_context: 0 &type->s_umount_key#28/1 proc_subdir_lock irq_context: 0 &type->s_umount_key#15/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#28/1 proc_inum_ida.xa_lock irq_context: 0 &type->s_umount_key#28/1 proc_subdir_lock irq_context: 0 &type->s_umount_key#28/1 &journal->j_state_lock irq_context: 0 &type->s_umount_key#28/1 kthread_create_lock irq_context: 0 &type->s_umount_key#28/1 &p->pi_lock irq_context: 0 &type->s_umount_key#28/1 &x->wait irq_context: 0 &type->s_umount_key#28/1 &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#28/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#16/1 irq_context: 0 &type->s_umount_key#16/1 fs_reclaim irq_context: 0 &type->s_umount_key#16/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#16/1 pool_lock#2 irq_context: 0 &type->s_umount_key#16/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#16/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#16/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#16/1 &c->lock irq_context: 0 &type->s_umount_key#16/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#16/1 &zone->lock irq_context: 0 &type->s_umount_key#16/1 &____s->seqcount irq_context: 0 &type->s_umount_key#16/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#16/1 sb_lock irq_context: 0 &type->s_umount_key#16/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#28/1 &journal->j_wait_done_commit irq_context: 0 &type->s_umount_key#16/1 mmu_notifier_invalidate_range_start irq_context: 0 &journal->j_wait_done_commit irq_context: 0 &journal->j_wait_done_commit &p->pi_lock irq_context: 0 &journal->j_wait_done_commit &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#16/1 &sb->s_type->i_lock_key#14 irq_context: 0 &type->s_umount_key#16/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#16/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#16/1 &sb->s_type->i_lock_key#14 &dentry->d_lock irq_context: 0 &type->s_umount_key#16/1 &dentry->d_lock irq_context: hardirq tick_broadcast_lock irq_context: hardirq tick_broadcast_lock jiffies_lock irq_context: hardirq hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 &s->s_inode_list_lock irq_context: 0 (wq_completion)events timer_update_work irq_context: 0 (wq_completion)events timer_update_work timer_keys_mutex irq_context: 0 (wq_completion)events timer_update_work timer_keys_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)events timer_update_work timer_keys_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 (wq_completion)events timer_update_work timer_keys_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 (wq_completion)events timer_update_work timer_keys_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 &type->s_umount_key#17/1 irq_context: 0 &type->s_umount_key#17/1 fs_reclaim irq_context: 0 &type->s_umount_key#17/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#17/1 pool_lock#2 irq_context: 0 &type->s_umount_key#17/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#17/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#17/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#17/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#17/1 sb_lock irq_context: 0 &type->s_umount_key#17/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#17/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#17/1 &sb->s_type->i_lock_key#15 irq_context: 0 &type->s_umount_key#17/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#17/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#17/1 &sb->s_type->i_lock_key#15 &dentry->d_lock irq_context: 0 &type->s_umount_key#17/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_lock_key#15 irq_context: 0 bio_slab_lock bio_slabs.xa_lock &c->lock irq_context: 0 bio_slab_lock bio_slabs.xa_lock &pcp->lock &zone->lock irq_context: 0 bio_slab_lock bio_slabs.xa_lock &zone->lock irq_context: 0 bio_slab_lock bio_slabs.xa_lock &____s->seqcount irq_context: 0 kclist_lock irq_context: 0 kclist_lock resource_lock irq_context: 0 kclist_lock fs_reclaim irq_context: 0 kclist_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kclist_lock pool_lock#2 irq_context: 0 &type->s_umount_key#18/1 irq_context: 0 &type->s_umount_key#18/1 fs_reclaim irq_context: 0 &type->s_umount_key#18/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#18/1 pool_lock#2 irq_context: 0 &type->s_umount_key#18/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#18/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#18/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#18/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#18/1 sb_lock irq_context: 0 &type->s_umount_key#18/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#18/1 &____s->seqcount irq_context: 0 &type->s_umount_key#18/1 &c->lock irq_context: 0 &type->s_umount_key#18/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#18/1 &sb->s_type->i_lock_key#16 irq_context: 0 &type->s_umount_key#18/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#18/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#18/1 &sb->s_type->i_lock_key#16 &dentry->d_lock irq_context: 0 &type->s_umount_key#18/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#2 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#2 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#2 &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#2 &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#2 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#2 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#2 &obj_hash[i].lock irq_context: 0 tomoyo_ss irq_context: 0 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 tomoyo_ss pool_lock#2 irq_context: 0 tomoyo_ss tomoyo_policy_lock irq_context: 0 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 tomoyo_ss tomoyo_policy_lock &____s->seqcount irq_context: 0 tomoyo_ss (console_sem).lock irq_context: 0 tomoyo_ss console_lock console_srcu console_owner_lock irq_context: 0 tomoyo_ss console_lock console_srcu console_owner irq_context: 0 tomoyo_ss console_lock console_srcu console_owner &port_lock_key irq_context: 0 tomoyo_ss console_lock console_srcu console_owner console_owner_lock irq_context: 0 &type->s_umount_key#19/1 irq_context: 0 &type->s_umount_key#19/1 fs_reclaim irq_context: 0 &type->s_umount_key#19/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#19/1 pool_lock#2 irq_context: 0 &type->s_umount_key#19/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#19/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#19/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#19/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#19/1 sb_lock irq_context: 0 &type->s_umount_key#19/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#19/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#19/1 &sb->s_type->i_lock_key#17 irq_context: 0 &type->s_umount_key#19/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#19/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#19/1 &sb->s_type->i_lock_key#17 &dentry->d_lock irq_context: 0 &type->s_umount_key#19/1 &dentry->d_lock irq_context: 0 &ns->lock irq_context: 0 &ns->lock &dentry->d_lock irq_context: 0 &ns->lock pin_fs_lock irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &sb->s_type->i_lock_key#17 irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 rename_lock.seqcount irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 fs_reclaim irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 pool_lock#2 irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &dentry->d_lock irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 rcu_read_lock rename_lock.seqcount irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &dentry->d_lock &wq irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 mmu_notifier_invalidate_range_start irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &s->s_inode_list_lock irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 tk_core.seq.seqcount irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &sb->s_type->i_lock_key#17 &dentry->d_lock irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &c->lock irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &pcp->lock &zone->lock irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &zone->lock irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &____s->seqcount irq_context: 0 &type->s_umount_key#20 irq_context: 0 &type->s_umount_key#20 sb_lock irq_context: 0 &type->s_umount_key#20 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#2 rcu_read_lock &dentry->d_lock irq_context: 0 pnp_lock irq_context: 0 pnp_lock fs_reclaim irq_context: 0 pnp_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pnp_lock pool_lock#2 irq_context: 0 &device->physical_node_lock sysfs_symlink_target_lock irq_context: 0 &device->physical_node_lock fs_reclaim irq_context: 0 &device->physical_node_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &device->physical_node_lock pool_lock#2 irq_context: 0 &device->physical_node_lock &c->lock irq_context: 0 &device->physical_node_lock &pcp->lock &zone->lock irq_context: 0 &device->physical_node_lock &zone->lock irq_context: 0 &device->physical_node_lock &____s->seqcount irq_context: 0 &device->physical_node_lock lock irq_context: 0 &device->physical_node_lock lock kernfs_idr_lock irq_context: 0 &device->physical_node_lock &root->kernfs_rwsem irq_context: 0 &device->physical_node_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 fwnode_link_lock irq_context: 0 fwnode_link_lock &k->k_lock irq_context: 0 subsys mutex#19 irq_context: softirq rcu_callback pcpu_lock irq_context: 0 subsys mutex#20 irq_context: 0 subsys mutex#20 &k->k_lock irq_context: 0 subsys mutex#21 irq_context: 0 subsys mutex#21 &k->k_lock irq_context: 0 subsys mutex#22 irq_context: 0 subsys mutex#22 &k->k_lock irq_context: 0 fill_pool_map-wait-type-override &c->lock irq_context: 0 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 tty_mutex irq_context: 0 fs_reclaim &rq->__lock irq_context: 0 fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 fill_pool_map-wait-type-override &zone->lock irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override pool_lock#2 irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override &c->lock irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override &zone->lock irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override &____s->seqcount irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override pool_lock irq_context: softirq led_lock irq_context: 0 subsys mutex#23 irq_context: 0 subsys mutex#23 &k->list_lock irq_context: 0 subsys mutex#23 &k->k_lock irq_context: 0 jiffies_seq.seqcount irq_context: 0 cpu_hotplug_lock wq_pool_mutex &n->list_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &xa->xa_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex kthread_create_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &p->pi_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock wq_pool_mutex &rq->__lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &x->wait irq_context: 0 cpu_hotplug_lock wq_pool_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock wq_pool_mutex wq_pool_attach_mutex irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 &p->pi_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 netevent_notif_chain.lock irq_context: 0 clients_rwsem irq_context: 0 clients_rwsem fs_reclaim irq_context: 0 clients_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 clients_rwsem clients.xa_lock irq_context: 0 devices_rwsem irq_context: 0 clients_rwsem clients.xa_lock pool_lock#2 irq_context: 0 (blocking_lsm_notifier_chain).rwsem irq_context: 0 (inetaddr_chain).rwsem irq_context: 0 inet6addr_chain.lock irq_context: 0 buses_mutex irq_context: 0 offload_lock irq_context: 0 inetsw_lock irq_context: 0 (wq_completion)events_power_efficient irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->managed_work)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->managed_work)->work) &tbl->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->managed_work)->work) &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->managed_work)->work) &tbl->lock &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->managed_work)->work) &tbl->lock &base->lock &obj_hash[i].lock irq_context: 0 ptype_lock irq_context: 0 pernet_ops_rwsem &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem &zone->lock irq_context: 0 pernet_ops_rwsem nl_table_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex batched_entropy_u32.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &tbl->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock irq_context: 0 rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work &base->lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &net->rules_mod_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem slab_mutex irq_context: 0 pernet_ops_rwsem slab_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem slab_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem slab_mutex &c->lock irq_context: 0 pernet_ops_rwsem slab_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem slab_mutex pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem batched_entropy_u32.lock irq_context: 0 tcp_ulp_list_lock irq_context: 0 xfrm_state_afinfo_lock irq_context: 0 xfrm_policy_afinfo_lock irq_context: 0 xfrm_input_afinfo_lock irq_context: 0 pernet_ops_rwsem percpu_counters_lock irq_context: 0 rtnl_mutex krc.lock irq_context: 0 rtnl_mutex krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex krc.lock hrtimer_bases.lock irq_context: 0 rtnl_mutex krc.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 rtnl_mutex krc.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex krc.lock &base->lock irq_context: 0 rtnl_mutex krc.lock &base->lock &obj_hash[i].lock irq_context: hardirq rcu_read_lock &pool->lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_highpri irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) fs_reclaim irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) &zone->lock irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) &____s->seqcount irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) pool_lock#2 irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) krc.lock irq_context: 0 &hashinfo->lock irq_context: 0 tcp_cong_list_lock irq_context: 0 slab_mutex rcu_read_lock pool_lock#2 irq_context: 0 slab_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem cache_list_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_node_0 irq_context: 0 pernet_ops_rwsem &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) cache_list_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_node_0 irq_context: 0 (rpc_pipefs_notifier_list).rwsem irq_context: 0 svc_xprt_class_lock irq_context: 0 xprt_list_lock irq_context: 0 xprt_list_lock (console_sem).lock irq_context: 0 xprt_list_lock console_lock console_srcu console_owner_lock irq_context: 0 xprt_list_lock console_lock console_srcu console_owner irq_context: 0 xprt_list_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 xprt_list_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &(&priv->bus_notifier)->rwsem irq_context: 0 pcibios_fwaddrmap_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &c->lock irq_context: 0 umhelper_sem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &zone->lock irq_context: 0 umhelper_sem usermodehelper_disabled_waitq.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock init_fs.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_mutex_key irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_mutex_key fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_mutex_key fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_mutex_key pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_mutex_key &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_mutex_key rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_mutex_key &dentry->d_lock &wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 mount_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_log_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_log_wait.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_lock_key#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &s->s_inode_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_lock_key#2 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss tomoyo_log_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss tomoyo_log_wait.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss tomoyo_policy_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key &sb->s_type->i_lock_key#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key &wb->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key &wb->list_lock &sb->s_type->i_lock_key#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &sb->s_type->i_lock_key#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) quarantine_lock irq_context: 0 &drv->dynids.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &sighand->siglock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) free_vmap_area_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) vmap_area_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) init_mm.page_table_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) batched_entropy_u64.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) init_files.file_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) init_fs.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) init_fs.lock &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &p->alloc_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) pidmap_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem css_set_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem tasklist_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem tasklist_lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem tasklist_lock &sighand->siglock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem css_set_lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) input_pool.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&tcp_orphan_timer) irq_context: softirq (&tcp_orphan_timer) &obj_hash[i].lock irq_context: softirq (&tcp_orphan_timer) &base->lock irq_context: softirq (&tcp_orphan_timer) &base->lock &obj_hash[i].lock irq_context: 0 &pool->lock/1 &base->lock irq_context: 0 &pool->lock/1 &base->lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &x->wait#6 irq_context: 0 cpu_hotplug_lock cpuhp_state-up fs_reclaim irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock cpuhp_state-up &c->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &n->list_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &pcp->lock &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &____s->seqcount irq_context: 0 umh_sysctl_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up pool_lock#2 irq_context: 0 &mm->mmap_lock irq_context: 0 &mm->mmap_lock fs_reclaim irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock &zone->lock irq_context: 0 &mm->mmap_lock &____s->seqcount irq_context: 0 &mm->mmap_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &c->lock irq_context: 0 &mm->mmap_lock fs_reclaim irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock &zone->lock irq_context: 0 &mm->mmap_lock &____s->seqcount irq_context: 0 &mm->mmap_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &mm->page_table_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page) irq_context: 0 &mm->mmap_lock &c->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock irq_context: 0 &mm->mmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 &sig->cred_guard_mutex irq_context: 0 &sig->cred_guard_mutex fs_reclaim irq_context: 0 &sig->cred_guard_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &fs->lock irq_context: 0 &sig->cred_guard_mutex &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &zone->lock irq_context: 0 &sig->cred_guard_mutex &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &c->lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &____s->seqcount#3 irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key fs_reclaim irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_read_lock rename_lock.seqcount irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &dentry->d_lock &wq irq_context: 0 &sig->cred_guard_mutex &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex delayed_uprobe_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#4 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#4 &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#5 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &vma->vm_lock->lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &anon_vma->rwsem irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#4 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#5 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex pgd_lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex key irq_context: 0 &sig->cred_guard_mutex pcpu_lock irq_context: 0 &sig->cred_guard_mutex percpu_counters_lock irq_context: 0 &tsk->futex_exit_mutex irq_context: 0 &tsk->futex_exit_mutex &p->pi_lock irq_context: 0 &p->alloc_lock &fs->lock irq_context: 0 &child->perf_event_mutex irq_context: 0 css_set_lock irq_context: 0 tasklist_lock irq_context: 0 tasklist_lock &pid->wait_pidfd irq_context: 0 tasklist_lock &sighand->siglock irq_context: 0 tasklist_lock &sighand->siglock &sig->wait_chldexit irq_context: 0 tasklist_lock &sighand->siglock input_pool.lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 &obj_hash[i].lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pool_lock#2 irq_context: 0 tasklist_lock &obj_hash[i].lock irq_context: 0 low_water_lock irq_context: 0 low_water_lock (console_sem).lock irq_context: 0 low_water_lock console_lock console_srcu console_owner_lock irq_context: 0 low_water_lock console_lock console_srcu console_owner irq_context: 0 low_water_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 low_water_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 vendor_module_lock irq_context: 0 vendor_module_lock slab_mutex irq_context: 0 vendor_module_lock slab_mutex fs_reclaim irq_context: 0 vendor_module_lock slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 vendor_module_lock slab_mutex &pcp->lock &zone->lock irq_context: 0 vendor_module_lock slab_mutex &zone->lock irq_context: 0 vendor_module_lock slab_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 vendor_module_lock slab_mutex &____s->seqcount irq_context: 0 vendor_module_lock slab_mutex pool_lock#2 irq_context: 0 vendor_module_lock slab_mutex rcu_read_lock pool_lock#2 irq_context: 0 vendor_module_lock slab_mutex &obj_hash[i].lock irq_context: 0 vendor_module_lock slab_mutex &c->lock irq_context: 0 vendor_module_lock slab_mutex &n->list_lock irq_context: 0 vendor_module_lock slab_mutex pcpu_alloc_mutex irq_context: 0 vendor_module_lock slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 vendor_module_lock pcpu_alloc_mutex irq_context: 0 vendor_module_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 vendor_module_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 vendor_module_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 vendor_module_lock &obj_hash[i].lock irq_context: 0 vendor_module_lock percpu_counters_lock irq_context: 0 vendor_module_lock fs_reclaim irq_context: 0 vendor_module_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 vendor_module_lock pool_lock#2 irq_context: 0 vendor_module_lock shrinker_rwsem irq_context: 0 vendor_module_lock &pcp->lock &zone->lock irq_context: 0 vendor_module_lock &zone->lock irq_context: 0 vendor_module_lock &____s->seqcount irq_context: 0 vendor_module_lock cpu_hotplug_lock irq_context: 0 vendor_module_lock cpu_hotplug_lock static_call_mutex irq_context: 0 vendor_module_lock cpu_hotplug_lock static_call_mutex text_mutex irq_context: 0 vendor_module_lock cpu_hotplug_lock static_call_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 vendor_module_lock cpu_hotplug_lock static_call_mutex text_mutex &rq->__lock irq_context: softirq &(&cache_cleaner)->timer irq_context: softirq &(&cache_cleaner)->timer rcu_read_lock &pool->lock irq_context: softirq &(&cache_cleaner)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&cache_cleaner)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&cache_cleaner)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&cache_cleaner)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vendor_module_lock timekeeper_lock irq_context: 0 vendor_module_lock timekeeper_lock pvclock_gtod_data irq_context: 0 misc_mtx &pcp->lock &zone->lock &____s->seqcount irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq timekeeper_lock tk_core.seq.seqcount pvclock_gtod_data irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) batched_entropy_u64.lock crngs.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &x->wait#9 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &obj_hash[i].lock pool_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &k->list_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex &k->list_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex fs_reclaim irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex lock kernfs_idr_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex &root->kernfs_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state-up lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up lock kernfs_idr_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &root->kernfs_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state-up &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state-up bus_type_sem irq_context: 0 cpu_hotplug_lock cpuhp_state-up sysfs_symlink_target_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &root->kernfs_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state-up &dev->power.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up dpm_list_mtx irq_context: 0 cpu_hotplug_lock cpuhp_state-up req_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &p->pi_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &x->wait#11 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#2 irq_context: 0 &type->i_mutex_dir_key#2 fs_reclaim irq_context: 0 &type->i_mutex_dir_key#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#2 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#2 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#2 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#2 &dentry->d_lock &wq irq_context: 0 rcu_read_lock &sb->s_type->i_lock_key#5 irq_context: 0 &mm->mmap_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up uevent_sock_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_node_0 irq_context: 0 cpu_hotplug_lock cpuhp_state-up running_helpers_waitq.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state-up subsys mutex#24 irq_context: 0 cpu_hotplug_lock cpuhp_state-up subsys mutex#24 &k->k_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex &c->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex &pcp->lock &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex &____s->seqcount irq_context: 0 &sig->cred_guard_mutex rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up subsys mutex#25 irq_context: 0 cpu_hotplug_lock cpuhp_state-up subsys mutex#25 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sig->cred_guard_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &zone->lock irq_context: 0 &sig->cred_guard_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock pool_lock#2 irq_context: 0 crypto_alg_sem irq_context: 0 &journal->j_wait_done_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#28/1 &sbi->s_error_lock irq_context: 0 &type->s_umount_key#28/1 &base->lock irq_context: 0 crypto_alg_sem &rq->__lock irq_context: 0 pm_qos_lock irq_context: 0 subsys mutex#26 irq_context: 0 subsys mutex#27 irq_context: 0 subsys mutex#27 &k->list_lock irq_context: 0 subsys mutex#27 &k->k_lock irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 subsys mutex#28 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) batched_entropy_u8.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) kfence_freelist_lock irq_context: 0 tasklist_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &c->lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &zone->lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &rq->__lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock &obj_hash[i].lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock pool_lock#2 irq_context: softirq rcu_callback rcu_read_lock &pool->lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 trace_event_sem trace_event_sem.wait_lock irq_context: 0 trace_event_sem &rq->__lock irq_context: 0 trace_event_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&p->wq) irq_context: 0 (wq_completion)events (work_completion)(&p->wq) vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) purge_vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&p->wq) pool_lock#2 irq_context: softirq rcu_callback &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &obj_hash[i].lock pool_lock irq_context: softirq rcu_callback &meta->lock irq_context: softirq rcu_callback kfence_freelist_lock irq_context: softirq &(&group->avgs_work)->timer irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq mm/memcontrol.c:589 irq_context: softirq mm/memcontrol.c:589 rcu_read_lock &pool->lock/1 irq_context: softirq mm/memcontrol.c:589 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq mm/memcontrol.c:589 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: hardirq &rq->__lock &obj_hash[i].lock irq_context: hardirq &rq->__lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) irq_context: hardirq &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work cgroup_rstat_lock irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work cgroup_rstat_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work &base->lock irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work &base->lock &obj_hash[i].lock irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq mm/memcontrol.c:589 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq mm/memcontrol.c:589 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock &base->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#28/1 &base->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#28/1 &fq->mq_flush_lock irq_context: 0 &type->s_umount_key#28/1 &fq->mq_flush_lock &q->requeue_lock irq_context: 0 &type->s_umount_key#28/1 &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#28/1 &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#28/1 &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#28/1 &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->s_umount_key#28/1 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#28/1 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#28/1 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) &q->requeue_lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) &hctx->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &hctx->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &journal->j_state_lock irq_context: 0 &journal->j_state_lock &journal->j_wait_done_commit irq_context: 0 &journal->j_state_lock &journal->j_wait_commit irq_context: softirq &fq->mq_flush_lock irq_context: softirq &fq->mq_flush_lock tk_core.seq.seqcount irq_context: softirq &fq->mq_flush_lock &q->requeue_lock irq_context: softirq &fq->mq_flush_lock &obj_hash[i].lock irq_context: softirq &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: softirq &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &fq->mq_flush_lock bit_wait_table + i irq_context: softirq &fq->mq_flush_lock bit_wait_table + i &p->pi_lock irq_context: softirq &fq->mq_flush_lock bit_wait_table + i &p->pi_lock &rq->__lock irq_context: softirq &fq->mq_flush_lock bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#28/1 &journal->j_state_lock irq_context: 0 &type->s_umount_key#28/1 &p->alloc_lock irq_context: 0 &type->s_umount_key#28/1 cpu_hotplug_lock irq_context: 0 &type->s_umount_key#28/1 cpu_hotplug_lock wq_pool_mutex irq_context: 0 &type->s_umount_key#28/1 cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 &type->s_umount_key#28/1 cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#28/1 cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 &type->s_umount_key#28/1 cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 &type->s_umount_key#28/1 cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 &type->s_umount_key#28/1 cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 &type->s_umount_key#28/1 wq_pool_mutex irq_context: 0 &type->s_umount_key#28/1 wq_pool_mutex &wq->mutex irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 pmus_lock fs_reclaim irq_context: 0 pmus_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pmus_lock &k->list_lock irq_context: 0 pmus_lock lock irq_context: 0 pmus_lock lock kernfs_idr_lock irq_context: 0 pmus_lock &root->kernfs_rwsem irq_context: 0 pmus_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pmus_lock uevent_sock_mutex irq_context: 0 pmus_lock rcu_read_lock &pool->lock/1 irq_context: 0 pmus_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 pmus_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 pmus_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 pmus_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 pmus_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pmus_lock running_helpers_waitq.lock irq_context: 0 pmus_lock &c->lock irq_context: 0 &type->s_umount_key#28/1 &ei->i_es_lock irq_context: 0 pmus_lock &____s->seqcount irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex slab_mutex irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex slab_mutex fs_reclaim irq_context: 0 pmus_lock &x->wait#9 irq_context: 0 pmus_lock &pcp->lock &zone->lock irq_context: 0 pmus_lock &zone->lock irq_context: 0 pmus_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex slab_mutex pool_lock#2 irq_context: 0 pmus_lock rcu_read_lock pool_lock#2 irq_context: 0 pmus_lock bus_type_sem irq_context: 0 pmus_lock sysfs_symlink_target_lock irq_context: 0 pmus_lock &k->k_lock irq_context: 0 pmus_lock &root->kernfs_rwsem irq_context: 0 pmus_lock &dev->power.lock irq_context: 0 pmus_lock dpm_list_mtx irq_context: 0 pmus_lock &dev->mutex &k->list_lock irq_context: 0 pmus_lock &dev->mutex &k->k_lock irq_context: 0 pmus_lock &dev->mutex &dev->power.lock irq_context: 0 pmus_lock subsys mutex#29 irq_context: 0 pmus_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex slab_mutex &c->lock irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex slab_mutex &n->list_lock irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex slab_mutex &____s->seqcount irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex slab_mutex pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex slab_mutex &root->kernfs_rwsem irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex slab_mutex &k->list_lock irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex slab_mutex lock irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex slab_mutex lock kernfs_idr_lock irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex slab_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &type->s_umount_key#28/1 &obj_hash[i].lock pool_lock irq_context: 0 &type->s_umount_key#28/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#28/1 ext4_li_mtx irq_context: 0 &type->s_umount_key#28/1 lock irq_context: 0 &type->s_umount_key#28/1 lock kernfs_idr_lock irq_context: 0 &type->s_umount_key#28/1 &root->kernfs_rwsem irq_context: 0 &type->s_umount_key#28/1 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &type->s_umount_key#28/1 lock kernfs_idr_lock pool_lock#2 irq_context: 0 &type->s_umount_key#28/1 (console_sem).lock irq_context: 0 &type->s_umount_key#28/1 console_lock console_srcu console_owner_lock irq_context: 0 key_user_lock irq_context: 0 crngs.lock base_crng.lock irq_context: 0 key_serial_lock irq_context: 0 key_construction_mutex irq_context: 0 &type->lock_class irq_context: 0 &type->lock_class keyring_serialise_link_lock irq_context: 0 &type->lock_class keyring_serialise_link_lock fs_reclaim irq_context: 0 &type->lock_class keyring_serialise_link_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->lock_class keyring_serialise_link_lock pool_lock#2 irq_context: 0 &type->lock_class keyring_serialise_link_lock &c->lock irq_context: 0 &type->lock_class keyring_serialise_link_lock &____s->seqcount irq_context: 0 &type->lock_class keyring_serialise_link_lock &obj_hash[i].lock irq_context: 0 keyring_serialise_link_lock irq_context: 0 &pgdat->kswapd_lock fs_reclaim irq_context: 0 &pgdat->kswapd_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &pgdat->kswapd_lock pool_lock#2 irq_context: 0 &pgdat->kswapd_lock kthread_create_lock irq_context: 0 &pgdat->kswapd_lock &p->pi_lock irq_context: 0 &pgdat->kswapd_lock &p->pi_lock &rq->__lock irq_context: 0 &pgdat->kswapd_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pgdat->kswapd_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_read_lock &dentry->d_lock irq_context: 0 &pgdat->kswapd_lock &cfs_rq->removed.lock irq_context: 0 &pgdat->kswapd_lock &obj_hash[i].lock irq_context: 0 &pgdat->kswapd_lock &x->wait irq_context: 0 &pgdat->kswapd_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &pgdat->kswapd_wait irq_context: 0 &pgdat->kswapd_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &x->wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 list_lrus_mutex irq_context: 0 drivers_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex fs_reclaim irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 damon_dbgfs_lock irq_context: 0 damon_dbgfs_lock fs_reclaim irq_context: 0 damon_dbgfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 damon_dbgfs_lock &c->lock irq_context: 0 damon_dbgfs_lock &____s->seqcount irq_context: 0 damon_dbgfs_lock pool_lock#2 irq_context: 0 damon_dbgfs_lock tk_core.seq.seqcount irq_context: 0 damon_dbgfs_lock damon_ops_lock irq_context: 0 damon_dbgfs_lock pin_fs_lock irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &type->s_umount_key#21/1 irq_context: 0 &type->s_umount_key#21/1 &rq->__lock irq_context: 0 &type->s_umount_key#21/1 fs_reclaim irq_context: 0 &type->s_umount_key#21/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#21/1 pool_lock#2 irq_context: 0 &type->s_umount_key#21/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#21/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#21/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#21/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#21/1 sb_lock irq_context: 0 &type->s_umount_key#21/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#21/1 &c->lock irq_context: 0 &type->s_umount_key#21/1 &____s->seqcount irq_context: 0 &type->s_umount_key#21/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#21/1 &sb->s_type->i_lock_key#18 irq_context: 0 &type->s_umount_key#21/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#21/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#21/1 &sb->s_type->i_lock_key#18 &dentry->d_lock irq_context: 0 &type->s_umount_key#21/1 &dentry->d_lock irq_context: 0 dq_list_lock irq_context: 0 &type->s_umount_key#22/1 irq_context: 0 &type->s_umount_key#22/1 fs_reclaim irq_context: 0 &type->s_umount_key#22/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#22/1 pool_lock#2 irq_context: 0 &type->s_umount_key#22/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#22/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#22/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#22/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#22/1 sb_lock irq_context: 0 &type->s_umount_key#22/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#22/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#22/1 &sb->s_type->i_lock_key#19 irq_context: 0 &type->s_umount_key#22/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#22/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#22/1 &sb->s_type->i_lock_key#19 &dentry->d_lock irq_context: 0 &type->s_umount_key#22/1 &dentry->d_lock irq_context: 0 configfs_subsystem_mutex irq_context: 0 &sb->s_type->i_mutex_key#7/1 irq_context: 0 &sb->s_type->i_mutex_key#7/1 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#7/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 configfs_dirent_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &sb->s_type->i_lock_key#19 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &sb->s_type->i_lock_key#19 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 irq_context: 0 misc_mtx &obj_hash[i].lock pool_lock irq_context: 0 &sig->cred_guard_mutex &rq->__lock irq_context: 0 &ecryptfs_kthread_ctl.wait irq_context: 0 ecryptfs_daemon_hash_mux irq_context: 0 ecryptfs_daemon_hash_mux fs_reclaim irq_context: 0 ecryptfs_daemon_hash_mux fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ecryptfs_daemon_hash_mux pool_lock#2 irq_context: 0 ecryptfs_msg_ctx_lists_mux irq_context: 0 ecryptfs_msg_ctx_lists_mux &ecryptfs_msg_ctx_arr[i].mux irq_context: 0 pernet_ops_rwsem tk_core.seq.seqcount irq_context: 0 pernet_ops_rwsem &k->list_lock irq_context: 0 pernet_ops_rwsem lock irq_context: 0 pernet_ops_rwsem lock kernfs_idr_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pernet_ops_rwsem uevent_sock_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem uevent_sock_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem uevent_sock_mutex nl_table_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex nl_table_wait.lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem running_helpers_waitq.lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &rq->__lock irq_context: 0 nfs_version_lock irq_context: softirq (&timer.timer) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 key_types_sem irq_context: 0 key_types_sem (console_sem).lock irq_context: 0 key_types_sem console_lock console_srcu console_owner_lock irq_context: 0 key_types_sem console_lock console_srcu console_owner irq_context: 0 key_types_sem console_lock console_srcu console_owner &port_lock_key irq_context: 0 key_types_sem console_lock console_srcu console_owner console_owner_lock irq_context: 0 pnfs_spinlock irq_context: 0 pernet_ops_rwsem &sn->pipefs_sb_lock irq_context: 0 pernet_ops_rwsem krc.lock irq_context: 0 pernet_ops_rwsem &s->s_inode_list_lock irq_context: 0 nls_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex rcu_read_lock pool_lock#2 irq_context: softirq rcu_callback quarantine_lock irq_context: 0 jffs2_compressor_list_lock irq_context: 0 misc_mtx rcu_read_lock pool_lock#2 irq_context: 0 next_tag_value_lock irq_context: 0 log_redrive_lock irq_context: 0 &x->wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &TxAnchor.LazyLock irq_context: 0 &TxAnchor.LazyLock jfs_commit_thread_wait.lock irq_context: 0 jfsTxnLock irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 slab_mutex batched_entropy_u8.lock irq_context: 0 slab_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 slab_mutex kfence_freelist_lock irq_context: 0 ocfs2_stack_lock irq_context: 0 ocfs2_stack_lock (console_sem).lock irq_context: 0 ocfs2_stack_lock console_lock console_srcu console_owner_lock irq_context: 0 ocfs2_stack_lock console_lock console_srcu console_owner irq_context: 0 ocfs2_stack_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 ocfs2_stack_lock console_lock console_srcu console_owner console_owner_lock irq_context: softirq rcu_callback fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_callback fill_pool_map-wait-type-override pool_lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_callback rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: softirq rcu_callback fill_pool_map-wait-type-override &c->lock irq_context: softirq rcu_callback fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: softirq rcu_callback fill_pool_map-wait-type-override &zone->lock irq_context: softirq rcu_callback fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 o2hb_callback_sem irq_context: 0 o2net_handler_lock irq_context: 0 &type->s_umount_key#23/1 irq_context: 0 &type->s_umount_key#23/1 fs_reclaim irq_context: 0 &type->s_umount_key#23/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#23/1 pool_lock#2 irq_context: 0 &type->s_umount_key#23/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#23/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#23/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#23/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#23/1 sb_lock irq_context: 0 &type->s_umount_key#23/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#23/1 &____s->seqcount irq_context: 0 &type->s_umount_key#23/1 &c->lock irq_context: 0 &type->s_umount_key#23/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#23/1 &sb->s_type->i_lock_key#20 irq_context: 0 &type->s_umount_key#23/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#23/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#23/1 &sb->s_type->i_lock_key#20 &dentry->d_lock irq_context: 0 &type->s_umount_key#23/1 &dentry->d_lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_hook_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_hook_mutex &c->lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock jump_label_mutex irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 alg_types_sem irq_context: 0 alg_types_sem fs_reclaim irq_context: 0 alg_types_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 alg_types_sem pool_lock#2 irq_context: 0 dma_list_mutex irq_context: 0 asymmetric_key_parsers_sem irq_context: 0 asymmetric_key_parsers_sem (console_sem).lock irq_context: 0 asymmetric_key_parsers_sem console_lock console_srcu console_owner_lock irq_context: 0 asymmetric_key_parsers_sem console_lock console_srcu console_owner irq_context: 0 asymmetric_key_parsers_sem console_lock console_srcu console_owner &port_lock_key irq_context: 0 asymmetric_key_parsers_sem console_lock console_srcu console_owner console_owner_lock irq_context: 0 key_types_sem &rq->__lock irq_context: 0 blkcg_pol_register_mutex irq_context: 0 blkcg_pol_register_mutex blkcg_pol_mutex irq_context: 0 blkcg_pol_register_mutex cgroup_mutex irq_context: 0 blkcg_pol_register_mutex cgroup_mutex &root->kernfs_rwsem irq_context: 0 blkcg_pol_register_mutex blkcg_pol_mutex fs_reclaim irq_context: 0 blkcg_pol_register_mutex blkcg_pol_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 blkcg_pol_register_mutex blkcg_pol_mutex pool_lock#2 irq_context: 0 blkcg_pol_register_mutex cgroup_mutex fs_reclaim irq_context: 0 blkcg_pol_register_mutex cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 blkcg_pol_register_mutex cgroup_mutex pool_lock#2 irq_context: 0 blkcg_pol_register_mutex cgroup_mutex lock irq_context: 0 blkcg_pol_register_mutex cgroup_mutex lock kernfs_idr_lock irq_context: 0 blkcg_pol_register_mutex cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 elv_list_lock irq_context: 0 crc_t10dif_mutex irq_context: 0 crc_t10dif_mutex crypto_alg_sem irq_context: 0 crc_t10dif_mutex fs_reclaim irq_context: 0 crc_t10dif_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 crc_t10dif_mutex pool_lock#2 irq_context: 0 crc64_rocksoft_mutex irq_context: 0 crc64_rocksoft_mutex crypto_alg_sem irq_context: 0 crc64_rocksoft_mutex fs_reclaim irq_context: 0 crc64_rocksoft_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 crc64_rocksoft_mutex pool_lock#2 irq_context: 0 ts_mod_lock irq_context: 0 &dev->mutex device_links_srcu irq_context: 0 &dev->mutex fwnode_link_lock irq_context: 0 &dev->mutex device_links_lock irq_context: 0 &dev->mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex fs_reclaim irq_context: 0 &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex pool_lock#2 irq_context: 0 &dev->mutex lock irq_context: 0 &dev->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &x->wait#9 irq_context: 0 &dev->mutex &obj_hash[i].lock irq_context: 0 &dev->mutex gdp_mutex irq_context: 0 &dev->mutex gdp_mutex &k->list_lock irq_context: 0 &dev->mutex gdp_mutex fs_reclaim irq_context: 0 &dev->mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex gdp_mutex &c->lock irq_context: 0 &dev->mutex gdp_mutex &____s->seqcount irq_context: 0 &dev->mutex gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex gdp_mutex lock irq_context: 0 &dev->mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex bus_type_sem irq_context: 0 &dev->mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex lock kernfs_idr_lock &zone->lock irq_context: 0 &dev->mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 &dev->mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex lock kernfs_idr_lock rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex lock kernfs_idr_lock &obj_hash[i].lock irq_context: 0 &dev->mutex &c->lock irq_context: 0 &dev->mutex &____s->seqcount irq_context: 0 &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex dpm_list_mtx irq_context: 0 &dev->mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &obj_hash[i].lock pool_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &dentry->d_lock/1 irq_context: 0 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex &cfs_rq->removed.lock irq_context: 0 &dev->mutex uevent_sock_mutex irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex subsys mutex#30 irq_context: 0 &dev->mutex subsys mutex#30 &k->k_lock irq_context: 0 &dev->mutex (console_sem).lock irq_context: 0 &dev->mutex console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex console_lock console_srcu console_owner irq_context: 0 &dev->mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex input_mutex irq_context: 0 &dev->mutex input_mutex fs_reclaim irq_context: 0 &dev->mutex input_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex input_mutex &c->lock irq_context: 0 &dev->mutex input_mutex &____s->seqcount irq_context: 0 &dev->mutex input_mutex pool_lock#2 irq_context: 0 &dev->mutex input_mutex &dev->mutex#2 irq_context: 0 &dev->mutex input_mutex input_devices_poll_wait.lock irq_context: 0 &dev->mutex wakeup_ida.xa_lock irq_context: 0 &dev->mutex &obj_hash[i].lock pool_lock irq_context: 0 &sig->cred_guard_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 &sig->cred_guard_mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex fill_pool_map-wait-type-override &zone->lock irq_context: 0 &sig->cred_guard_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &dev->mutex subsys mutex#15 irq_context: 0 &dev->mutex subsys mutex#15 &k->k_lock irq_context: 0 &dev->mutex events_lock irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock &obj_hash[i].lock irq_context: 0 &dev->mutex &ACCESS_PRIVATE(sdp, lock) irq_context: 0 &dev->mutex wakeup_srcu irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex wakeup_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) wakeup_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &zone->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex &x->wait#3 irq_context: 0 &dev->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex wakeup_srcu_srcu_usage.lock irq_context: 0 &dev->mutex (&ws->timer) irq_context: 0 &dev->mutex &base->lock irq_context: 0 &dev->mutex &k->k_lock klist_remove_lock irq_context: 0 &dev->mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 &dev->mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &dev->mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 &dev->mutex subsys mutex#15 &k->k_lock klist_remove_lock irq_context: 0 &dev->mutex deferred_probe_mutex irq_context: 0 &dev->mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pool_lock irq_context: 0 &dev->mutex kernfs_idr_lock irq_context: 0 &dev->mutex &ws->lock irq_context: 0 &dev->mutex deleted_ws.lock irq_context: 0 &dev->mutex semaphore->lock irq_context: 0 &dev->mutex *(&acpi_gbl_hardware_lock) irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex probe_waitqueue.lock irq_context: 0 register_count_mutex irq_context: 0 register_count_mutex &k->list_lock irq_context: 0 register_count_mutex fs_reclaim irq_context: 0 register_count_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 register_count_mutex pool_lock#2 irq_context: 0 register_count_mutex lock irq_context: 0 register_count_mutex lock kernfs_idr_lock irq_context: 0 register_count_mutex &root->kernfs_rwsem irq_context: 0 register_count_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 register_count_mutex &k->k_lock irq_context: 0 register_count_mutex uevent_sock_mutex irq_context: 0 register_count_mutex &obj_hash[i].lock irq_context: 0 register_count_mutex rcu_read_lock &pool->lock/1 irq_context: 0 register_count_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 register_count_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 register_count_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 register_count_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 register_count_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 register_count_mutex running_helpers_waitq.lock irq_context: 0 register_count_mutex &rq->__lock irq_context: 0 (cpufreq_policy_notifier_list).rwsem irq_context: 0 &dev->mutex cpu_add_remove_lock irq_context: 0 &dev->mutex tick_broadcast_lock irq_context: 0 &dev->mutex cpuidle_driver_lock irq_context: 0 &dev->mutex cpuidle_lock irq_context: 0 &dev->mutex cpuidle_lock fs_reclaim irq_context: 0 &dev->mutex cpuidle_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex cpuidle_lock pool_lock#2 irq_context: 0 &dev->mutex cpuidle_lock &c->lock irq_context: 0 &dev->mutex cpuidle_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex cpuidle_lock &zone->lock irq_context: 0 &dev->mutex cpuidle_lock &____s->seqcount irq_context: 0 &dev->mutex cpuidle_lock lock irq_context: 0 &dev->mutex cpuidle_lock lock kernfs_idr_lock irq_context: 0 &dev->mutex cpuidle_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex cpuidle_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex cpuidle_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex thermal_cdev_ida.xa_lock irq_context: 0 &dev->mutex cpufreq_driver_lock irq_context: 0 &dev->mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex &zone->lock irq_context: 0 &dev->mutex subsys mutex#31 irq_context: 0 &dev->mutex subsys mutex#31 &k->k_lock irq_context: 0 &dev->mutex thermal_list_lock irq_context: 0 (x86_mce_decoder_chain).rwsem irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem irq_context: 0 &dev->mutex &drv->dynids.lock irq_context: 0 &dev->mutex pci_config_lock irq_context: 0 &dev->mutex *(&acpi_gbl_reference_count_lock) irq_context: 0 &dev->mutex acpi_link_lock irq_context: 0 &dev->mutex acpi_link_lock fs_reclaim irq_context: 0 &dev->mutex acpi_link_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex acpi_link_lock pool_lock#2 irq_context: 0 &dev->mutex acpi_link_lock semaphore->lock irq_context: 0 &dev->mutex acpi_link_lock &obj_hash[i].lock irq_context: 0 &dev->mutex acpi_link_lock &____s->seqcount irq_context: 0 &dev->mutex acpi_link_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex acpi_link_lock &zone->lock irq_context: 0 &dev->mutex acpi_link_lock rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex acpi_link_lock *(&acpi_gbl_reference_count_lock) irq_context: 0 &dev->mutex acpi_link_lock &c->lock irq_context: 0 &dev->mutex acpi_link_lock pci_config_lock irq_context: 0 &dev->mutex acpi_link_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex acpi_link_lock &rq->__lock irq_context: 0 &dev->mutex acpi_link_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) uevent_sock_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) running_helpers_waitq.lock irq_context: 0 &dev->mutex acpi_link_lock (console_sem).lock irq_context: 0 &dev->mutex acpi_link_lock console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex acpi_link_lock console_lock console_srcu console_owner irq_context: 0 &dev->mutex acpi_link_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex acpi_link_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex acpi_ioapic_lock irq_context: 0 &dev->mutex acpi_ioapic_lock ioapic_mutex irq_context: 0 &dev->mutex resource_lock irq_context: 0 &dev->mutex virtio_index_ida.xa_lock irq_context: 0 &dev->mutex &dev->mutex &dev->power.lock irq_context: 0 &dev->mutex &dev->mutex &k->list_lock irq_context: 0 &dev->mutex &dev->mutex &k->k_lock irq_context: 0 &dev->mutex subsys mutex#32 irq_context: 0 &dev->mutex fwnode_link_lock &k->k_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock quarantine_lock irq_context: 0 &dev->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex &dev->devres_lock irq_context: 0 &dev->mutex &md->mutex irq_context: 0 &dev->mutex &md->mutex fs_reclaim irq_context: 0 &dev->mutex &md->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &md->mutex pool_lock#2 irq_context: 0 &dev->mutex &md->mutex irq_domain_mutex irq_context: 0 &dev->mutex &md->mutex irq_domain_mutex fs_reclaim irq_context: 0 &dev->mutex &md->mutex irq_domain_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &md->mutex irq_domain_mutex pool_lock#2 irq_context: 0 &dev->mutex &md->mutex irq_domain_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex resource_lock irq_context: 0 &dev->mutex memtype_lock irq_context: 0 &dev->mutex free_vmap_area_lock irq_context: 0 &dev->mutex free_vmap_area_lock &obj_hash[i].lock irq_context: 0 &dev->mutex free_vmap_area_lock pool_lock#2 irq_context: 0 &dev->mutex vmap_area_lock irq_context: 0 &dev->mutex &md->mutex pci_config_lock irq_context: 0 &dev->mutex &md->mutex &xa->xa_lock#4 irq_context: 0 &dev->mutex &md->mutex &xa->xa_lock#4 pool_lock#2 irq_context: 0 &dev->mutex &md->mutex &domain->mutex irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock fs_reclaim irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock pool_lock#2 irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock pcpu_alloc_mutex irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &obj_hash[i].lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock lock kernfs_idr_lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &c->lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &zone->lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &____s->seqcount irq_context: 0 &dev->mutex &md->mutex &domain->mutex fs_reclaim irq_context: 0 &dev->mutex &md->mutex &domain->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &md->mutex &domain->mutex pool_lock#2 irq_context: 0 &dev->mutex &md->mutex &domain->mutex vector_lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex &irq_desc_lock_class irq_context: 0 &dev->mutex &md->mutex &irq_desc_lock_class irq_context: 0 &dev->mutex &md->mutex vector_lock irq_context: 0 &dev->mutex &md->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex &md->mutex lock irq_context: 0 &dev->mutex &md->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex &md->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex &md->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &md->mutex &c->lock irq_context: 0 &dev->mutex &md->mutex &____s->seqcount irq_context: 0 &dev->mutex &desc->request_mutex irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class vector_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class mask_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock vector_lock irq_context: 0 &dev->mutex register_lock irq_context: 0 &dev->mutex register_lock proc_subdir_lock irq_context: 0 &dev->mutex register_lock fs_reclaim irq_context: 0 &dev->mutex register_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex register_lock pool_lock#2 irq_context: 0 &dev->mutex register_lock proc_inum_ida.xa_lock irq_context: 0 &dev->mutex register_lock proc_subdir_lock irq_context: 0 &dev->mutex &irq_desc_lock_class irq_context: 0 &dev->mutex proc_subdir_lock irq_context: 0 &dev->mutex proc_inum_ida.xa_lock irq_context: 0 &dev->mutex proc_subdir_lock irq_context: 0 &dev->mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex &zone->lock &____s->seqcount irq_context: 0 &dev->mutex &dev->vqs_list_lock irq_context: 0 &dev->mutex &vp_dev->lock irq_context: 0 &dev->mutex register_lock &c->lock irq_context: 0 &dev->mutex register_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex register_lock &zone->lock irq_context: 0 &dev->mutex register_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) kfence_freelist_lock irq_context: 0 &dev->mutex cpu_hotplug_lock irq_context: 0 &dev->mutex &s->s_inode_list_lock irq_context: 0 &dev->mutex (oom_notify_list).rwsem irq_context: 0 &dev->mutex &dev->config_lock irq_context: 0 vdpa_dev_lock irq_context: 0 subsys mutex#33 irq_context: 0 subsys mutex#33 &k->k_lock irq_context: 0 pcpu_alloc_mutex &rq->__lock irq_context: 0 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &anon_vma->rwsem &rq->__lock irq_context: hardirq allocation_wait.lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock fs_reclaim &rq->__lock irq_context: 0 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &mm->mmap_lock &rq->__lock irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &zone->lock irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 quarantine_lock irq_context: 0 &root->kernfs_rwsem &rq->__lock irq_context: 0 &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sig->cred_guard_mutex fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock fill_pool_map-wait-type-override &zone->lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &tsk->futex_exit_mutex &rq->__lock irq_context: 0 &sig->cred_guard_mutex fs_reclaim &rq->__lock irq_context: 0 &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_node_0 irq_context: 0 &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock irq_context: 0 &sig->cred_guard_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &child->perf_event_mutex &rq->__lock irq_context: 0 pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pcpu_alloc_mutex.wait_lock irq_context: 0 pcpu_alloc_mutex &cfs_rq->removed.lock irq_context: 0 pcpu_alloc_mutex &obj_hash[i].lock irq_context: 0 pcpu_alloc_mutex pool_lock#2 irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 &obj_hash[i].lock pool_lock irq_context: 0 &sig->cred_guard_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock batched_entropy_u8.lock irq_context: 0 &mm->mmap_lock kfence_freelist_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &meta->lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock kfence_freelist_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &cfs_rq->removed.lock irq_context: 0 &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#4 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &dev->mutex pnp_lock irq_context: 0 &dev->mutex serial_mutex irq_context: 0 &dev->mutex serial_mutex gpio_lookup_lock irq_context: 0 &dev->mutex serial_mutex port_mutex irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex fs_reclaim irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex console_mutex irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex resource_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &port_lock_key irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex (console_sem).lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex console_lock console_srcu console_owner irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex ctrl_ida.xa_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &x->wait#9 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &obj_hash[i].lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &dev->power.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &k->list_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex bus_type_sem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &k->k_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex dpm_list_mtx irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &c->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &____s->seqcount irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex uevent_sock_mutex irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &zone->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &dev->mutex &dev->power.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &dev->mutex &k->list_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &dev->mutex &k->k_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex subsys mutex#34 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex semaphore->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex *(&acpi_gbl_reference_count_lock) irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex dev_pm_qos_sysfs_mtx irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex kernfs_idr_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &k->k_lock klist_remove_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex deferred_probe_mutex irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex device_links_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex kernfs_idr_lock &obj_hash[i].lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex &k->list_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex fs_reclaim irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex req_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &p->pi_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &rq->__lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &cfs_rq->removed.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &x->wait#11 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex subsys mutex#21 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex subsys mutex#21 &k->k_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex chrdevs_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex &c->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex &____s->seqcount irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex batched_entropy_u8.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex kfence_freelist_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &meta->lock irq_context: 0 &dev->mutex rng_index_ida.xa_lock irq_context: hardirq &x->wait#12 irq_context: 0 &dev->mutex rng_mutex irq_context: 0 &dev->mutex rng_mutex &x->wait#13 irq_context: 0 &dev->mutex rng_mutex fs_reclaim irq_context: 0 &dev->mutex rng_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex rng_mutex pool_lock#2 irq_context: 0 &dev->mutex rng_mutex kthread_create_lock irq_context: 0 &dev->mutex rng_mutex &p->pi_lock irq_context: 0 &dev->mutex rng_mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex rng_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex rng_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex rng_mutex &rq->__lock irq_context: 0 &dev->mutex rng_mutex &x->wait irq_context: 0 &dev->mutex rng_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex rng_mutex &obj_hash[i].lock irq_context: 0 rng_mutex irq_context: 0 reading_mutex irq_context: 0 &dev->mutex reading_mutex irq_context: 0 &dev->mutex reading_mutex reading_mutex.wait_lock irq_context: 0 &dev->mutex reading_mutex &rq->__lock irq_context: 0 reading_mutex.wait_lock irq_context: 0 &dev->mutex reading_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex input_pool.lock irq_context: 0 &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 &root->kernfs_rwsem pool_lock#2 irq_context: 0 klist_remove_lock irq_context: 0 &k->k_lock klist_remove_lock irq_context: 0 kernfs_idr_lock irq_context: 0 &dev->devres_lock irq_context: 0 &dev->managed.lock irq_context: 0 &type->s_umount_key#24/1 irq_context: 0 &type->s_umount_key#24/1 fs_reclaim irq_context: 0 &type->s_umount_key#24/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#24/1 &c->lock irq_context: 0 &type->s_umount_key#24/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#24/1 &zone->lock irq_context: 0 &type->s_umount_key#24/1 &____s->seqcount irq_context: 0 &type->s_umount_key#24/1 pool_lock#2 irq_context: 0 &type->s_umount_key#24/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#24/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#24/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#24/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#24/1 sb_lock irq_context: 0 &type->s_umount_key#24/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#24/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#24/1 &sb->s_type->i_lock_key#21 irq_context: 0 &type->s_umount_key#24/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#24/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#24/1 rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#24/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#24/1 &sb->s_type->i_lock_key#21 &dentry->d_lock irq_context: 0 &type->s_umount_key#24/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_lock_key#21 irq_context: 0 lock drm_minor_lock irq_context: 0 lock drm_minor_lock pool_lock#2 irq_context: 0 stack_depot_init_mutex irq_context: 0 &dev->debugfs_mutex irq_context: 0 subsys mutex#35 irq_context: 0 subsys mutex#35 &k->k_lock irq_context: 0 drm_minor_lock irq_context: 0 cpuset_rwsem &p->pi_lock &rq->__lock &rt_b->rt_runtime_lock irq_context: 0 cpuset_rwsem &p->pi_lock &rq->__lock &rt_b->rt_runtime_lock tk_core.seq.seqcount irq_context: 0 cpuset_rwsem &p->pi_lock &rq->__lock &rt_b->rt_runtime_lock hrtimer_bases.lock irq_context: 0 cpuset_rwsem &p->pi_lock &rq->__lock &rt_b->rt_runtime_lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 cpuset_rwsem &p->pi_lock &rq->__lock &rt_b->rt_runtime_lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: hardirq &rt_b->rt_runtime_lock irq_context: hardirq &rt_b->rt_runtime_lock tk_core.seq.seqcount irq_context: hardirq &rt_rq->rt_runtime_lock irq_context: 0 (worker)->lock irq_context: 0 &dev->mode_config.idr_mutex irq_context: 0 &dev->mode_config.idr_mutex fs_reclaim irq_context: 0 &dev->mode_config.idr_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mode_config.idr_mutex pool_lock#2 irq_context: 0 crtc_ww_class_acquire irq_context: 0 crtc_ww_class_acquire crtc_ww_class_mutex irq_context: 0 crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_acquire irq_context: 0 crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_acquire reservation_ww_class_mutex irq_context: 0 &dev->mode_config.blob_lock irq_context: 0 &xa->xa_lock#5 irq_context: 0 &xa->xa_lock#6 irq_context: 0 &dev->mode_config.connector_list_lock irq_context: 0 &dev->vbl_lock irq_context: 0 drm_connector_list_iter &dev->mode_config.connector_list_lock irq_context: 0 drm_connector_list_iter fs_reclaim irq_context: 0 drm_connector_list_iter fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 drm_connector_list_iter pool_lock#2 irq_context: 0 drm_connector_list_iter &connector->mutex irq_context: 0 drm_connector_list_iter &connector->mutex fs_reclaim irq_context: 0 drm_connector_list_iter &connector->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 drm_connector_list_iter &connector->mutex &c->lock irq_context: 0 drm_connector_list_iter &connector->mutex &pcp->lock &zone->lock irq_context: 0 drm_connector_list_iter &connector->mutex &zone->lock irq_context: 0 drm_connector_list_iter &connector->mutex &____s->seqcount irq_context: 0 drm_connector_list_iter &connector->mutex pool_lock#2 irq_context: 0 drm_connector_list_iter &connector->mutex &x->wait#9 irq_context: 0 drm_connector_list_iter &connector->mutex &obj_hash[i].lock irq_context: 0 drm_connector_list_iter &connector->mutex &k->list_lock irq_context: 0 drm_connector_list_iter &connector->mutex lock irq_context: 0 drm_connector_list_iter &connector->mutex lock kernfs_idr_lock irq_context: 0 drm_connector_list_iter &connector->mutex &root->kernfs_rwsem irq_context: 0 drm_connector_list_iter &connector->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 drm_connector_list_iter &connector->mutex bus_type_sem irq_context: 0 drm_connector_list_iter &connector->mutex sysfs_symlink_target_lock irq_context: 0 drm_connector_list_iter &connector->mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 drm_connector_list_iter &connector->mutex &root->kernfs_rwsem irq_context: 0 drm_connector_list_iter &connector->mutex &dev->power.lock irq_context: 0 drm_connector_list_iter &connector->mutex dpm_list_mtx irq_context: 0 drm_connector_list_iter &connector->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 drm_connector_list_iter &connector->mutex uevent_sock_mutex irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock/1 irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 drm_connector_list_iter &connector->mutex running_helpers_waitq.lock irq_context: 0 drm_connector_list_iter &connector->mutex &k->k_lock irq_context: 0 drm_connector_list_iter &connector->mutex subsys mutex#35 irq_context: 0 drm_connector_list_iter &connector->mutex subsys mutex#35 &k->k_lock irq_context: 0 drm_connector_list_iter &connector->mutex pin_fs_lock irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 drm_connector_list_iter &connector->mutex &dev->mode_config.idr_mutex irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 drm_connector_list_iter &connector->mutex connector_list_lock irq_context: softirq &(&krcp->monitor_work)->timer irq_context: softirq &(&krcp->monitor_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&krcp->monitor_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&krcp->monitor_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&krcp->monitor_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&krcp->monitor_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&tbl->managed_work)->timer irq_context: softirq &(&tbl->managed_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&tbl->managed_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&tbl->managed_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) krc.lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback pool_lock#2 irq_context: 0 &dev->filelist_mutex irq_context: 0 &helper->lock irq_context: 0 &helper->lock drm_connector_list_iter &dev->mode_config.connector_list_lock irq_context: 0 &helper->lock drm_connector_list_iter fs_reclaim irq_context: 0 &helper->lock drm_connector_list_iter fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &helper->lock drm_connector_list_iter pool_lock#2 irq_context: 0 &helper->lock fs_reclaim irq_context: 0 &helper->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &helper->lock pool_lock#2 irq_context: 0 &helper->lock &client->modeset_mutex irq_context: 0 &helper->lock &client->modeset_mutex &dev->mode_config.mutex irq_context: 0 &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire irq_context: 0 &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex irq_context: 0 &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex fs_reclaim irq_context: 0 &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex pool_lock#2 irq_context: 0 &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex &c->lock irq_context: 0 &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex &____s->seqcount irq_context: 0 &helper->lock &client->modeset_mutex &dev->mode_config.mutex fs_reclaim irq_context: 0 &helper->lock &client->modeset_mutex &dev->mode_config.mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &helper->lock &client->modeset_mutex &dev->mode_config.mutex pool_lock#2 irq_context: 0 &helper->lock &client->modeset_mutex &dev->mode_config.mutex &obj_hash[i].lock irq_context: 0 &helper->lock &client->modeset_mutex fs_reclaim irq_context: 0 &helper->lock &client->modeset_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &helper->lock &client->modeset_mutex pool_lock#2 irq_context: 0 &helper->lock &obj_hash[i].lock irq_context: 0 &helper->lock &client->modeset_mutex drm_connector_list_iter &dev->mode_config.connector_list_lock irq_context: 0 &helper->lock mmu_notifier_invalidate_range_start irq_context: 0 &helper->lock &sb->s_type->i_lock_key irq_context: 0 &helper->lock &s->s_inode_list_lock irq_context: 0 &helper->lock tk_core.seq.seqcount irq_context: 0 &helper->lock batched_entropy_u32.lock irq_context: 0 &helper->lock &sb->s_type->i_lock_key &dentry->d_lock irq_context: 0 &helper->lock &mgr->vm_lock irq_context: 0 &helper->lock &mgr->vm_lock pool_lock#2 irq_context: 0 &helper->lock &dev->object_name_lock irq_context: 0 &helper->lock &dev->object_name_lock lock irq_context: 0 &helper->lock &dev->object_name_lock lock &file_private->table_lock irq_context: 0 &helper->lock &dev->object_name_lock lock &file_private->table_lock pool_lock#2 irq_context: 0 &helper->lock &node->vm_lock irq_context: 0 &helper->lock &file_private->table_lock irq_context: 0 &helper->lock &dev->mode_config.idr_mutex irq_context: 0 &helper->lock &dev->mode_config.fb_lock irq_context: 0 &helper->lock &file->fbs_lock irq_context: 0 &helper->lock &prime_fpriv->lock irq_context: 0 &helper->lock &node->vm_lock &obj_hash[i].lock irq_context: 0 &helper->lock &node->vm_lock pool_lock#2 irq_context: 0 &helper->lock &file_private->table_lock &obj_hash[i].lock irq_context: 0 &helper->lock &file_private->table_lock pool_lock#2 irq_context: 0 &helper->lock free_vmap_area_lock irq_context: 0 &helper->lock vmap_area_lock irq_context: 0 &helper->lock &pcp->lock &zone->lock irq_context: 0 &helper->lock &zone->lock irq_context: 0 &helper->lock &____s->seqcount irq_context: 0 &helper->lock init_mm.page_table_lock irq_context: 0 &helper->lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &helper->lock &rq->__lock irq_context: 0 &helper->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &helper->lock &cfs_rq->removed.lock irq_context: 0 &helper->lock &c->lock irq_context: 0 registration_lock irq_context: 0 registration_lock fs_reclaim irq_context: 0 registration_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 registration_lock pool_lock#2 irq_context: 0 registration_lock &x->wait#9 irq_context: 0 registration_lock &obj_hash[i].lock irq_context: 0 registration_lock &k->list_lock irq_context: 0 registration_lock gdp_mutex irq_context: 0 registration_lock gdp_mutex &k->list_lock irq_context: 0 registration_lock gdp_mutex fs_reclaim irq_context: 0 registration_lock gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 registration_lock gdp_mutex pool_lock#2 irq_context: 0 registration_lock gdp_mutex lock irq_context: 0 registration_lock gdp_mutex lock kernfs_idr_lock irq_context: 0 registration_lock gdp_mutex &root->kernfs_rwsem irq_context: 0 registration_lock gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 registration_lock lock irq_context: 0 registration_lock lock kernfs_idr_lock irq_context: 0 registration_lock &root->kernfs_rwsem irq_context: 0 registration_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 registration_lock bus_type_sem irq_context: 0 registration_lock sysfs_symlink_target_lock irq_context: 0 registration_lock &root->kernfs_rwsem irq_context: 0 registration_lock &c->lock irq_context: 0 registration_lock &pcp->lock &zone->lock irq_context: 0 registration_lock &zone->lock irq_context: 0 registration_lock &____s->seqcount irq_context: 0 registration_lock &dev->power.lock irq_context: 0 registration_lock dpm_list_mtx irq_context: 0 registration_lock req_lock irq_context: 0 registration_lock &p->pi_lock irq_context: 0 registration_lock &p->pi_lock &rq->__lock irq_context: 0 registration_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 registration_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 registration_lock &rq->__lock irq_context: 0 registration_lock &x->wait#11 irq_context: 0 registration_lock uevent_sock_mutex irq_context: 0 registration_lock rcu_read_lock &pool->lock/1 irq_context: 0 registration_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 registration_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 registration_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 registration_lock running_helpers_waitq.lock irq_context: 0 registration_lock &k->k_lock irq_context: 0 registration_lock subsys mutex#11 irq_context: 0 registration_lock subsys mutex#11 &k->k_lock irq_context: 0 registration_lock vt_switch_mutex irq_context: 0 registration_lock vt_switch_mutex fs_reclaim irq_context: 0 registration_lock vt_switch_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 registration_lock vt_switch_mutex pool_lock#2 irq_context: 0 registration_lock (console_sem).lock irq_context: 0 registration_lock console_lock irq_context: 0 registration_lock console_lock &fb_info->lock irq_context: 0 registration_lock console_lock fs_reclaim irq_context: 0 registration_lock console_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 registration_lock console_lock pool_lock#2 irq_context: 0 registration_lock console_lock &obj_hash[i].lock irq_context: 0 registration_lock console_lock &pcp->lock &zone->lock irq_context: 0 registration_lock console_lock &zone->lock irq_context: 0 registration_lock console_lock &____s->seqcount irq_context: 0 registration_lock console_lock rcu_read_lock pool_lock#2 irq_context: 0 registration_lock console_lock &base->lock irq_context: 0 registration_lock console_lock &base->lock &obj_hash[i].lock irq_context: 0 registration_lock console_lock &x->wait#9 irq_context: 0 registration_lock console_lock &k->list_lock irq_context: 0 registration_lock console_lock gdp_mutex irq_context: 0 registration_lock console_lock gdp_mutex &k->list_lock irq_context: 0 registration_lock console_lock lock irq_context: 0 registration_lock console_lock lock kernfs_idr_lock irq_context: 0 registration_lock console_lock &root->kernfs_rwsem irq_context: 0 registration_lock console_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 registration_lock console_lock bus_type_sem irq_context: 0 registration_lock console_lock &c->lock irq_context: 0 registration_lock console_lock sysfs_symlink_target_lock irq_context: 0 registration_lock console_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 registration_lock console_lock &root->kernfs_rwsem irq_context: 0 registration_lock console_lock &dev->power.lock irq_context: 0 registration_lock console_lock dpm_list_mtx irq_context: 0 registration_lock console_lock uevent_sock_mutex irq_context: 0 registration_lock console_lock rcu_read_lock &pool->lock/1 irq_context: 0 registration_lock console_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 registration_lock console_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 registration_lock console_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 registration_lock console_lock running_helpers_waitq.lock irq_context: 0 registration_lock console_lock subsys mutex#5 irq_context: 0 registration_lock console_lock subsys mutex#5 &k->k_lock irq_context: 0 registration_lock console_lock vga_lock irq_context: 0 registration_lock console_lock &helper->lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire fs_reclaim irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire pool_lock#2 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex fs_reclaim irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex pool_lock#2 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &obj_hash[i].lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &obj_hash[i].lock pool_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &dev->mode_config.idr_mutex irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &dev->mode_config.blob_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex drm_connector_list_iter &dev->mode_config.connector_list_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &c->lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &pcp->lock &zone->lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &zone->lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &____s->seqcount irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &crtc->commit_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock fs_reclaim irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock pool_lock#2 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &pcp->lock &zone->lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &zone->lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &____s->seqcount irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &xa->xa_lock#7 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock lock#4 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &info->lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &xa->xa_lock#7 pool_lock#2 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock lock#4 &lruvec->lru_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &rq->__lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock fs_reclaim irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock pool_lock#2 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock free_vmap_area_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock vmap_area_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &____s->seqcount irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock init_mm.page_table_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &pcp->lock &zone->lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &zone->lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex tk_core.seq.seqcount irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vbl_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &____s->seqcount#6 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &x->wait#14 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &obj_hash[i].lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock pool_lock#2 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &dev->vbl_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &dev->vbl_lock &dev->vblank_time_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &dev->vbl_lock &dev->vblank_time_lock tk_core.seq.seqcount irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &dev->vbl_lock &dev->vblank_time_lock &(&vblank->seqlock)->lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &dev->vbl_lock &dev->vblank_time_lock &(&vblank->seqlock)->lock &____s->seqcount#6 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &x->wait#14 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex (work_completion)(&vkms_state->composer_work) irq_context: 0 registration_lock console_lock &helper->damage_lock irq_context: 0 registration_lock console_lock rcu_read_lock &pool->lock irq_context: 0 registration_lock console_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 registration_lock console_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 registration_lock console_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 registration_lock console_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 registration_lock console_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 registration_lock console_lock &helper->lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->damage_lock irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->lock irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->lock &lock->wait_lock irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex rcu_read_lock pool_lock#2 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex drm_connector_list_iter fs_reclaim irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex drm_connector_list_iter fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex drm_connector_list_iter pool_lock#2 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vbl_lock &dev->vblank_time_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vbl_lock &dev->vblank_time_lock &obj_hash[i].lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vbl_lock &dev->vblank_time_lock hrtimer_bases.lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vbl_lock &dev->vblank_time_lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vbl_lock &dev->vblank_time_lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vbl_lock &dev->vblank_time_lock tk_core.seq.seqcount irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vblank_time_lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &base->lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &base->lock &obj_hash[i].lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &rq->__lock irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq &vkms_out->lock irq_context: hardirq &vkms_out->lock &dev->event_lock irq_context: hardirq &vkms_out->lock &dev->event_lock &dev->vblank_time_lock irq_context: hardirq &vkms_out->lock &dev->event_lock &dev->vblank_time_lock &(&vblank->seqlock)->lock irq_context: hardirq &vkms_out->lock &dev->event_lock &dev->vblank_time_lock &(&vblank->seqlock)->lock &____s->seqcount#6 irq_context: hardirq &vkms_out->lock &dev->event_lock &vblank->queue irq_context: hardirq &vkms_out->lock &dev->event_lock &____s->seqcount#6 irq_context: hardirq &vkms_out->lock &dev->event_lock &obj_hash[i].lock irq_context: hardirq &vkms_out->lock &dev->event_lock &base->lock irq_context: hardirq &vkms_out->lock &dev->event_lock &base->lock &obj_hash[i].lock irq_context: hardirq &vkms_out->lock &dev->event_lock &x->wait#14 irq_context: hardirq &vkms_out->lock &dev->event_lock &x->wait#14 &p->pi_lock irq_context: hardirq &vkms_out->lock &dev->event_lock &x->wait#14 &p->pi_lock &rq->__lock irq_context: hardirq &vkms_out->lock &dev->event_lock &x->wait#14 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq &vkms_out->lock &dev->event_lock pool_lock#2 irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex (&timer.timer) irq_context: 0 registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex (work_completion)(&vkms_state->composer_work)#2 irq_context: 0 registration_lock console_lock &lock->wait_lock irq_context: 0 registration_lock console_lock &p->pi_lock irq_context: 0 registration_lock console_lock &p->pi_lock &rq->__lock irq_context: 0 registration_lock console_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 registration_lock console_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->lock reservation_ww_class_mutex irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->lock reservation_ww_class_mutex &shmem->vmap_lock irq_context: 0 registration_lock console_lock batched_entropy_u8.lock irq_context: 0 registration_lock console_lock kfence_freelist_lock irq_context: 0 registration_lock console_lock vt_event_lock irq_context: 0 registration_lock console_lock &meta->lock irq_context: 0 registration_lock console_lock (console_sem).lock irq_context: 0 registration_lock console_lock console_owner_lock irq_context: 0 registration_lock console_lock console_srcu console_owner_lock irq_context: 0 registration_lock console_lock console_srcu console_owner irq_context: 0 registration_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 registration_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 kernel_fb_helper_lock irq_context: 0 &dev->clientlist_mutex irq_context: 0 &dev->queue_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex lock kernfs_idr_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &root->kernfs_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &c->lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &pcp->lock &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &____s->seqcount irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 blk_queue_ida.xa_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) wq_pool_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) wq_pool_mutex &wq->mutex irq_context: 0 &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) pcpu_alloc_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) batched_entropy_u32.lock irq_context: 0 &xa->xa_lock#8 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) mmu_notifier_invalidate_range_start irq_context: 0 lock &q->queue_lock irq_context: 0 lock &q->queue_lock &blkcg->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) blk_queue_ida.xa_lock irq_context: 0 &q->queue_lock irq_context: 0 &q->queue_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock irq_context: 0 &q->queue_lock pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &q->unused_hctx_lock irq_context: 0 &q->queue_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock mmu_notifier_invalidate_range_start irq_context: 0 &q->queue_lock percpu_counters_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &obj_hash[i].lock irq_context: 0 &q->queue_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock cpu_hotplug_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &bdev->bd_size_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &xa->xa_lock#9 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &set->tag_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) bio_slab_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) percpu_counters_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &s->s_inode_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &xa->xa_lock#8 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) lock &q->queue_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) lock &q->queue_lock &blkcg->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) lock &q->queue_lock &blkcg->lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->mq_freeze_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock percpu_counters_lock irq_context: 0 subsys mutex#36 irq_context: 0 subsys mutex#36 &k->k_lock irq_context: 0 dev_hotplug_mutex irq_context: 0 dev_hotplug_mutex &dev->power.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &x->wait#9 irq_context: 0 &q->sysfs_dir_lock irq_context: 0 &q->sysfs_dir_lock fs_reclaim irq_context: 0 &q->sysfs_dir_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &q->sysfs_dir_lock pool_lock#2 irq_context: 0 &q->sysfs_dir_lock lock irq_context: 0 &q->sysfs_dir_lock lock kernfs_idr_lock irq_context: 0 &q->sysfs_dir_lock &root->kernfs_rwsem irq_context: 0 &q->sysfs_dir_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &q->sysfs_dir_lock &c->lock irq_context: 0 &q->sysfs_dir_lock &pcp->lock &zone->lock irq_context: 0 &q->sysfs_dir_lock &zone->lock irq_context: 0 &q->sysfs_dir_lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) batched_entropy_u32.lock crngs.lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex pin_fs_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 percpu_ref_switch_lock irq_context: 0 subsys mutex#37 irq_context: 0 subsys mutex#37 &k->k_lock irq_context: 0 cgwb_lock irq_context: 0 bdi_lock irq_context: 0 inode_hash_lock irq_context: 0 inode_hash_lock &sb->s_type->i_lock_key#3 irq_context: 0 bdev_lock irq_context: 0 &disk->open_mutex irq_context: 0 &disk->open_mutex fs_reclaim irq_context: 0 &disk->open_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &disk->open_mutex pool_lock#2 irq_context: 0 &disk->open_mutex free_vmap_area_lock irq_context: 0 &disk->open_mutex vmap_area_lock irq_context: 0 &disk->open_mutex &____s->seqcount irq_context: 0 &disk->open_mutex init_mm.page_table_lock irq_context: 0 &disk->open_mutex &pcp->lock &zone->lock irq_context: 0 &disk->open_mutex &zone->lock irq_context: 0 &disk->open_mutex &xa->xa_lock#7 irq_context: 0 &disk->open_mutex lock#4 irq_context: 0 &disk->open_mutex mmu_notifier_invalidate_range_start irq_context: 0 &disk->open_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &disk->open_mutex &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &____s->seqcount irq_context: 0 &disk->open_mutex &mapping->private_lock irq_context: 0 &disk->open_mutex tk_core.seq.seqcount irq_context: 0 &disk->open_mutex &ret->b_uptodate_lock irq_context: 0 &disk->open_mutex &obj_hash[i].lock irq_context: 0 &disk->open_mutex &xa->xa_lock#7 pool_lock#2 irq_context: 0 &disk->open_mutex rcu_read_lock pool_lock#2 irq_context: 0 &disk->open_mutex purge_vmap_area_lock irq_context: 0 &disk->open_mutex &sb->s_type->i_lock_key#3 irq_context: 0 &disk->open_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#7 irq_context: 0 &disk->open_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#7 &obj_hash[i].lock irq_context: 0 &disk->open_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#7 pool_lock#2 irq_context: 0 &disk->open_mutex lock#4 &lruvec->lru_lock irq_context: 0 &disk->open_mutex lock#5 irq_context: 0 &disk->open_mutex &lruvec->lru_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &q->sysfs_dir_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) major_names_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) major_names_lock fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) major_names_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) major_names_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) major_names_lock major_names_spinlock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) floppy_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) floppy_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) floppy_lock &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) floppy_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rtc_lock irq_context: 0 misc_mtx lock kernfs_idr_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &wq->mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &wq->mutex &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &wq->mutex &x->wait#10 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class vector_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class ioapic_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class mask_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock vector_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock ioapic_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class ioapic_lock i8259A_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) register_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &irq_desc_lock_class irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) proc_subdir_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) proc_inum_ida.xa_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) proc_subdir_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) resource_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &base->lock &obj_hash[i].lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &disk->open_mutex purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 &disk->open_mutex purge_vmap_area_lock pool_lock#2 irq_context: 0 &q->queue_lock &c->lock irq_context: 0 &q->queue_lock &pcp->lock &zone->lock irq_context: 0 &q->queue_lock &zone->lock irq_context: 0 &q->queue_lock &____s->seqcount irq_context: 0 &disk->open_mutex &xa->xa_lock#7 &c->lock irq_context: 0 &disk->open_mutex &xa->xa_lock#7 &pcp->lock &zone->lock irq_context: 0 &disk->open_mutex &xa->xa_lock#7 &zone->lock irq_context: 0 &disk->open_mutex &xa->xa_lock#7 &____s->seqcount irq_context: 0 &q->queue_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &q->queue_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &disk->open_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &disk->open_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &disk->open_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) (&timer.timer) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) command_done.lock irq_context: 0 misc_mtx lock kernfs_idr_lock &____s->seqcount irq_context: 0 vsock_register_mutex irq_context: softirq rcu_callback rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&p->wq) purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) purge_vmap_area_lock pool_lock#2 irq_context: 0 &q->queue_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &q->queue_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &q->queue_lock fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 &q->queue_lock fill_pool_map-wait-type-override &zone->lock irq_context: 0 loop_ctl_mutex irq_context: 0 loop_ctl_mutex fs_reclaim irq_context: 0 loop_ctl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 loop_ctl_mutex pool_lock#2 irq_context: 0 &q->sysfs_lock irq_context: 0 &q->sysfs_lock &q->unused_hctx_lock irq_context: 0 &q->sysfs_lock mmu_notifier_invalidate_range_start irq_context: 0 &q->sysfs_lock pool_lock#2 irq_context: 0 &q->sysfs_lock &obj_hash[i].lock irq_context: 0 &q->sysfs_lock cpu_hotplug_lock irq_context: 0 &q->sysfs_lock cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 &q->sysfs_lock fs_reclaim irq_context: 0 &q->sysfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &q->sysfs_lock &xa->xa_lock#9 irq_context: 0 &set->tag_list_lock irq_context: 0 &q->mq_freeze_lock irq_context: 0 &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock fs_reclaim irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &c->lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &____s->seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock pool_lock#2 irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock pcpu_alloc_mutex irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &obj_hash[i].lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->mq_freeze_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock percpu_ref_switch_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->queue_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &stats->lock irq_context: 0 &q->sysfs_lock &c->lock irq_context: 0 &q->sysfs_lock &pcp->lock &zone->lock irq_context: 0 &q->sysfs_lock &zone->lock irq_context: 0 &q->sysfs_lock &____s->seqcount irq_context: 0 &q->sysfs_dir_lock batched_entropy_u8.lock irq_context: 0 &q->sysfs_dir_lock kfence_freelist_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &obj_hash[i].lock pool_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &pcp->lock &zone->lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &zone->lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &pcp->lock &zone->lock &____s->seqcount irq_context: hardirq &vb->stop_update_lock irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_freezable irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) cpu_hotplug_lock irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) &s->s_inode_list_lock irq_context: 0 &q->sysfs_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &q->sysfs_lock fill_pool_map-wait-type-override pool_lock irq_context: softirq &(&ops->cursor_work)->timer irq_context: softirq &(&ops->cursor_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&ops->cursor_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&ops->cursor_work)->timer rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq &(&ops->cursor_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&ops->cursor_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&ops->cursor_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) (console_sem).lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) console_lock &helper->damage_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) console_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) console_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) console_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &q->sysfs_dir_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &q->sysfs_dir_lock lock kernfs_idr_lock &c->lock irq_context: 0 &q->sysfs_dir_lock lock kernfs_idr_lock &____s->seqcount irq_context: 0 nbd_index_mutex irq_context: 0 nbd_index_mutex fs_reclaim irq_context: 0 nbd_index_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nbd_index_mutex pool_lock#2 irq_context: 0 set->srcu irq_context: 0 (work_completion)(&(&q->requeue_work)->work) irq_context: 0 (work_completion)(&(&hctx->run_work)->work) irq_context: 0 &q->debugfs_mutex irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock lock kernfs_idr_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &root->kernfs_rwsem irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock set->srcu irq_context: 0 &q->queue_lock rcu_read_lock &pool->lock irq_context: 0 &q->queue_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &q->queue_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &q->queue_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &q->queue_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override pool_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 &q->sysfs_lock &n->list_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &q->sysfs_lock &obj_hash[i].lock pool_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &c->lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &zone->lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &q->sysfs_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex quarantine_lock irq_context: 0 zram_index_mutex irq_context: 0 zram_index_mutex fs_reclaim irq_context: 0 zram_index_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 zram_index_mutex pool_lock#2 irq_context: 0 zram_index_mutex blk_queue_ida.xa_lock irq_context: 0 zram_index_mutex &obj_hash[i].lock irq_context: 0 zram_index_mutex pcpu_alloc_mutex irq_context: 0 zram_index_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 zram_index_mutex bio_slab_lock irq_context: 0 zram_index_mutex &c->lock irq_context: 0 zram_index_mutex &pcp->lock &zone->lock irq_context: 0 zram_index_mutex &zone->lock irq_context: 0 zram_index_mutex &____s->seqcount irq_context: 0 zram_index_mutex percpu_counters_lock irq_context: 0 zram_index_mutex &obj_hash[i].lock pool_lock irq_context: 0 zram_index_mutex mmu_notifier_invalidate_range_start irq_context: 0 zram_index_mutex &sb->s_type->i_lock_key#3 irq_context: 0 zram_index_mutex &s->s_inode_list_lock irq_context: 0 zram_index_mutex &xa->xa_lock#8 irq_context: 0 zram_index_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 zram_index_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 zram_index_mutex lock irq_context: 0 zram_index_mutex lock &q->queue_lock irq_context: 0 zram_index_mutex lock &q->queue_lock &blkcg->lock irq_context: 0 zram_index_mutex &q->queue_lock irq_context: 0 zram_index_mutex &q->queue_lock pool_lock#2 irq_context: 0 zram_index_mutex &q->queue_lock pcpu_lock irq_context: 0 zram_index_mutex &q->queue_lock &obj_hash[i].lock irq_context: 0 zram_index_mutex &q->queue_lock percpu_counters_lock irq_context: 0 zram_index_mutex &x->wait#9 irq_context: 0 zram_index_mutex &bdev->bd_size_lock irq_context: 0 zram_index_mutex &k->list_lock irq_context: 0 zram_index_mutex gdp_mutex irq_context: 0 zram_index_mutex gdp_mutex &k->list_lock irq_context: 0 zram_index_mutex lock kernfs_idr_lock irq_context: 0 zram_index_mutex &root->kernfs_rwsem irq_context: 0 zram_index_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 zram_index_mutex bus_type_sem irq_context: 0 zram_index_mutex sysfs_symlink_target_lock irq_context: 0 zram_index_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 zram_index_mutex &root->kernfs_rwsem irq_context: 0 zram_index_mutex &dev->power.lock irq_context: 0 zram_index_mutex dpm_list_mtx irq_context: 0 zram_index_mutex req_lock irq_context: 0 zram_index_mutex &p->pi_lock irq_context: 0 zram_index_mutex &p->pi_lock &rq->__lock irq_context: 0 zram_index_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 zram_index_mutex &rq->__lock irq_context: 0 zram_index_mutex &x->wait#11 irq_context: 0 zram_index_mutex subsys mutex#36 irq_context: 0 zram_index_mutex subsys mutex#36 &k->k_lock irq_context: 0 zram_index_mutex dev_hotplug_mutex irq_context: 0 zram_index_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock fs_reclaim irq_context: 0 zram_index_mutex &q->sysfs_dir_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 zram_index_mutex &q->sysfs_dir_lock pool_lock#2 irq_context: 0 zram_index_mutex &q->sysfs_dir_lock lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock lock kernfs_idr_lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &root->kernfs_rwsem irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &c->lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &pcp->lock &zone->lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &zone->lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &____s->seqcount irq_context: 0 zram_index_mutex &q->sysfs_dir_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex pin_fs_lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 zram_index_mutex percpu_ref_switch_lock irq_context: 0 zram_index_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 zram_index_mutex rcu_read_lock pool_lock#2 irq_context: 0 zram_index_mutex uevent_sock_mutex irq_context: 0 zram_index_mutex rcu_read_lock &pool->lock/1 irq_context: 0 zram_index_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 zram_index_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 zram_index_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 zram_index_mutex running_helpers_waitq.lock irq_context: 0 zram_index_mutex subsys mutex#37 irq_context: 0 zram_index_mutex subsys mutex#37 &k->k_lock irq_context: 0 zram_index_mutex cgwb_lock irq_context: 0 zram_index_mutex pin_fs_lock irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 zram_index_mutex bdi_lock irq_context: 0 zram_index_mutex inode_hash_lock irq_context: 0 zram_index_mutex inode_hash_lock &sb->s_type->i_lock_key#3 irq_context: 0 zram_index_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 zram_index_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 zram_index_mutex (console_sem).lock irq_context: 0 zram_index_mutex console_lock console_srcu console_owner_lock irq_context: 0 zram_index_mutex console_lock console_srcu console_owner irq_context: 0 zram_index_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 zram_index_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 subsys mutex#38 irq_context: 0 subsys mutex#38 &k->k_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]#2 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]#2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]#2 configfs_dirent_lock irq_context: 0 &q->sysfs_lock &xa->xa_lock#9 pool_lock#2 irq_context: 0 &lock irq_context: 0 &lock nullb_indexes.xa_lock irq_context: 0 &q->sysfs_dir_lock &obj_hash[i].lock irq_context: 0 &disk->open_mutex rcu_read_lock tk_core.seq.seqcount irq_context: 0 &disk->open_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 &disk->open_mutex rcu_read_lock &base->lock irq_context: 0 &disk->open_mutex rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 &disk->open_mutex rcu_read_lock &ret->b_uptodate_lock irq_context: 0 ctx_list.lock irq_context: 0 nfc_index_ida.xa_lock irq_context: 0 nfc_devlist_mutex irq_context: 0 nfc_devlist_mutex fs_reclaim irq_context: 0 nfc_devlist_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nfc_devlist_mutex pool_lock#2 irq_context: 0 nfc_devlist_mutex &k->list_lock irq_context: 0 nfc_devlist_mutex gdp_mutex irq_context: 0 nfc_devlist_mutex gdp_mutex &k->list_lock irq_context: 0 nfc_devlist_mutex gdp_mutex fs_reclaim irq_context: 0 nfc_devlist_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nfc_devlist_mutex gdp_mutex pool_lock#2 irq_context: 0 nfc_devlist_mutex gdp_mutex lock irq_context: 0 nfc_devlist_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 nfc_devlist_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 nfc_devlist_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 nfc_devlist_mutex lock irq_context: 0 nfc_devlist_mutex lock kernfs_idr_lock irq_context: 0 nfc_devlist_mutex &root->kernfs_rwsem irq_context: 0 nfc_devlist_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 nfc_devlist_mutex bus_type_sem irq_context: 0 nfc_devlist_mutex sysfs_symlink_target_lock irq_context: 0 nfc_devlist_mutex &root->kernfs_rwsem irq_context: 0 nfc_devlist_mutex &dev->power.lock irq_context: 0 nfc_devlist_mutex dpm_list_mtx irq_context: 0 nfc_devlist_mutex uevent_sock_mutex irq_context: 0 nfc_devlist_mutex &obj_hash[i].lock irq_context: 0 nfc_devlist_mutex rcu_read_lock &pool->lock/1 irq_context: 0 nfc_devlist_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 nfc_devlist_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 nfc_devlist_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 nfc_devlist_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 nfc_devlist_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfc_devlist_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 nfc_devlist_mutex running_helpers_waitq.lock irq_context: 0 nfc_devlist_mutex subsys mutex#39 irq_context: 0 nfc_devlist_mutex subsys mutex#39 &k->k_lock irq_context: 0 &dev->mutex rfkill_global_mutex irq_context: 0 &dev->mutex rfkill_global_mutex fs_reclaim irq_context: 0 &dev->mutex rfkill_global_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex rfkill_global_mutex &____s->seqcount irq_context: 0 &dev->mutex rfkill_global_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex rfkill_global_mutex &zone->lock irq_context: 0 &dev->mutex rfkill_global_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex rfkill_global_mutex pool_lock#2 irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex rfkill_global_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex rfkill_global_mutex &k->list_lock irq_context: 0 &dev->mutex rfkill_global_mutex lock irq_context: 0 &dev->mutex rfkill_global_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex rfkill_global_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex rfkill_global_mutex bus_type_sem irq_context: 0 &dev->mutex rfkill_global_mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex rfkill_global_mutex &c->lock irq_context: 0 &dev->mutex rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex rfkill_global_mutex &dev->power.lock irq_context: 0 &dev->mutex rfkill_global_mutex dpm_list_mtx irq_context: 0 &dev->mutex rfkill_global_mutex &rfkill->lock irq_context: 0 &dev->mutex rfkill_global_mutex uevent_sock_mutex irq_context: 0 &dev->mutex rfkill_global_mutex &n->list_lock irq_context: 0 &dev->mutex rfkill_global_mutex &n->list_lock &c->lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex rfkill_global_mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex rfkill_global_mutex &k->k_lock irq_context: 0 &dev->mutex rfkill_global_mutex subsys mutex#40 irq_context: 0 &dev->mutex rfkill_global_mutex subsys mutex#40 &k->k_lock irq_context: 0 &dev->mutex rfkill_global_mutex triggers_list_lock irq_context: 0 &dev->mutex rfkill_global_mutex leds_list_lock irq_context: 0 &dev->mutex rfkill_global_mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex rfkill_global_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex rfkill_global_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex rfkill_global_mutex.wait_lock irq_context: 0 &dev->mutex &p->pi_lock irq_context: 0 &dev->mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex &rfkill->lock irq_context: 0 nfc_devlist_mutex &c->lock irq_context: 0 nfc_devlist_mutex &____s->seqcount irq_context: 0 &dev->mutex rfkill_global_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 &pool->lock &base->lock irq_context: 0 &pool->lock &base->lock &obj_hash[i].lock irq_context: 0 dma_heap_minors.xa_lock irq_context: 0 subsys mutex#41 irq_context: 0 subsys mutex#41 &k->k_lock irq_context: 0 heap_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fs_reclaim &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex host_index_ida.xa_lock irq_context: 0 &dev->mutex kthread_create_lock irq_context: 0 &dev->mutex &x->wait irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 &dev->mutex subsys mutex#9 irq_context: 0 &dev->mutex wq_pool_mutex irq_context: 0 &dev->mutex wq_pool_mutex &wq->mutex irq_context: 0 &dev->mutex &md->mutex &domain->mutex &c->lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex &zone->lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex &____s->seqcount irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class tmp_mask_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class tmp_mask_lock vector_lock irq_context: 0 &dev->mutex scsi_sense_cache_mutex irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex fs_reclaim irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex pool_lock#2 irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex &c->lock irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex &n->list_lock irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex pcpu_alloc_mutex irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 &dev->mutex pcpu_alloc_mutex irq_context: 0 &dev->mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 &dev->mutex batched_entropy_u32.lock irq_context: 0 &dev->mutex &n->list_lock irq_context: 0 &dev->mutex batched_entropy_u8.lock irq_context: 0 &dev->mutex kfence_freelist_lock irq_context: 0 &dev->mutex &dev->power.lock &dev->power.lock/1 irq_context: 0 &dev->mutex subsys mutex#42 irq_context: 0 &dev->mutex subsys mutex#43 irq_context: 0 &dev->mutex subsys mutex#43 &k->k_lock irq_context: 0 &dev->mutex attribute_container_mutex irq_context: 0 &dev->mutex &dev->power.lock &dev->power.wait_queue irq_context: 0 &dev->mutex &virtscsi_vq->vq_lock irq_context: 0 &dev->mutex &shost->scan_mutex irq_context: 0 &dev->mutex &shost->scan_mutex fs_reclaim irq_context: 0 &dev->mutex &shost->scan_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &shost->scan_mutex pool_lock#2 irq_context: 0 &dev->mutex &shost->scan_mutex shost->host_lock irq_context: 0 &dev->mutex async_scan_lock irq_context: 0 &dev->mutex async_scan_lock &x->wait#15 irq_context: 0 &dev->mutex async_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->power.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &x->wait#9 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex shost->host_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex attribute_container_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex blk_queue_ida.xa_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex pcpu_alloc_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &q->unused_hctx_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock cpu_hotplug_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &xa->xa_lock#9 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex batched_entropy_u32.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &hctx->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &hctx->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &x->wait#16 irq_context: hardirq &virtscsi_vq->vq_lock irq_context: softirq &x->wait#16 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 major_names_lock &c->lock irq_context: 0 major_names_lock &pcp->lock &zone->lock irq_context: 0 major_names_lock &zone->lock irq_context: 0 major_names_lock &____s->seqcount irq_context: 0 subsys mutex#44 irq_context: 0 subsys mutex#44 &k->list_lock irq_context: 0 subsys mutex#44 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (&timer.timer) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &sdev->state_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->mq_freeze_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (&q->timeout) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (work_completion)(&q->timeout_work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (work_completion)(&(&q->requeue_work)->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (work_completion)(&(&hctx->run_work)->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &xa->xa_lock#9 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->unused_hctx_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (work_completion)(&sdev->requeue_work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (work_completion)(&sdev->event_work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &sdev->inquiry_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (console_sem).lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex console_lock console_srcu console_owner console_owner_lock irq_context: softirq &x->wait#16 &p->pi_lock irq_context: softirq &x->wait#16 &p->pi_lock &rq->__lock irq_context: softirq &x->wait#16 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock &q->mq_freeze_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 nvmf_hosts_mutex irq_context: 0 subsys mutex#45 irq_context: 0 subsys mutex#45 &k->k_lock irq_context: 0 nvmf_transports_rwsem irq_context: 0 subsys mutex#46 irq_context: 0 subsys mutex#46 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 configfs_dirent_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &sb->s_type->i_lock_key#19 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &sb->s_type->i_lock_key#19 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 irq_context: 0 nvmet_config_sem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock rcu_node_0 irq_context: 0 subsys mutex#47 irq_context: 0 subsys mutex#47 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex quarantine_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &sdev->inquiry_mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fill_pool_map-wait-type-override &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fill_pool_map-wait-type-override pool_lock irq_context: softirq &x->wait#16 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 configfs_dirent_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &sb->s_type->i_lock_key#19 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &sb->s_type->i_lock_key#19 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 configfs_dirent_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &sb->s_type->i_lock_key#19 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &sb->s_type->i_lock_key#19 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 configfs_dirent_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &sb->s_type->i_lock_key#19 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &sb->s_type->i_lock_key#19 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &default_group_class[depth - 1]#6 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &default_group_class[depth - 1]#6 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &default_group_class[depth - 1]#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &default_group_class[depth - 1]#6 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &default_group_class[depth - 1]#6 configfs_dirent_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &default_group_class[depth - 1]#6/2 irq_context: 0 backend_mutex irq_context: 0 scsi_mib_index_lock irq_context: 0 hba_lock irq_context: 0 device_mutex irq_context: 0 device_mutex fs_reclaim irq_context: 0 device_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 device_mutex pool_lock#2 irq_context: 0 &hba->device_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 rcu_read_lock init_fs.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key irq_context: 0 &sb->s_type->i_mutex_key fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key fs_reclaim mmu_notifier_invalidate_range_start irq_context: softirq &x->wait#16 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key &c->lock irq_context: 0 &sb->s_type->i_mutex_key &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key &zone->lock irq_context: 0 &sb->s_type->i_mutex_key &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key &dentry->d_lock &wq irq_context: 0 mtd_table_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock cpu_hotplug_lock &rq->__lock irq_context: 0 part_parser_lock irq_context: 0 (kmod_concurrent_max).lock irq_context: 0 &x->wait#17 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &sig->wait_chldexit irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock irq_context: 0 tasklist_lock &sighand->siglock &sig->wait_chldexit &p->pi_lock irq_context: 0 tasklist_lock &sighand->siglock &sig->wait_chldexit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 tasklist_lock &sighand->siglock &sig->wait_chldexit &p->pi_lock &rq->__lock irq_context: 0 tasklist_lock &sighand->siglock &sig->wait_chldexit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tasklist_lock &sighand->siglock &sig->wait_chldexit &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &____s->seqcount#5 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &prev->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &sighand->siglock &(&sig->stats_lock)->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) css_set_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock input_pool.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &x->wait#17 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &x->wait#17 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &x->wait#17 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &x->wait#17 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock fill_pool_map-wait-type-override &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 mtd_table_mutex fs_reclaim irq_context: 0 mtd_table_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex pool_lock#2 irq_context: 0 mtd_table_mutex &x->wait#9 irq_context: 0 mtd_table_mutex &obj_hash[i].lock irq_context: 0 mtd_table_mutex &k->list_lock irq_context: 0 mtd_table_mutex gdp_mutex irq_context: 0 mtd_table_mutex gdp_mutex &k->list_lock irq_context: 0 mtd_table_mutex gdp_mutex fs_reclaim irq_context: 0 mtd_table_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex gdp_mutex pool_lock#2 irq_context: 0 mtd_table_mutex gdp_mutex lock irq_context: 0 mtd_table_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 mtd_table_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 mtd_table_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 mtd_table_mutex lock irq_context: 0 mtd_table_mutex lock kernfs_idr_lock irq_context: 0 mtd_table_mutex &root->kernfs_rwsem irq_context: 0 mtd_table_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 mtd_table_mutex bus_type_sem irq_context: 0 mtd_table_mutex sysfs_symlink_target_lock irq_context: 0 mtd_table_mutex &c->lock irq_context: 0 mtd_table_mutex &____s->seqcount irq_context: 0 mtd_table_mutex &root->kernfs_rwsem irq_context: 0 mtd_table_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 mtd_table_mutex &dev->power.lock irq_context: 0 mtd_table_mutex dpm_list_mtx irq_context: 0 mtd_table_mutex req_lock irq_context: 0 mtd_table_mutex &p->pi_lock irq_context: 0 mtd_table_mutex &p->pi_lock &rq->__lock irq_context: 0 mtd_table_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mtd_table_mutex &x->wait#11 irq_context: 0 mtd_table_mutex &rq->__lock irq_context: 0 mtd_table_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mtd_table_mutex uevent_sock_mutex irq_context: 0 mtd_table_mutex rcu_read_lock &pool->lock/1 irq_context: 0 mtd_table_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 mtd_table_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 mtd_table_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 mtd_table_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 mtd_table_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mtd_table_mutex running_helpers_waitq.lock irq_context: 0 mtd_table_mutex subsys mutex#48 irq_context: 0 mtd_table_mutex subsys mutex#48 &k->k_lock irq_context: 0 mtd_table_mutex devtree_lock irq_context: 0 mtd_table_mutex nvmem_ida.xa_lock irq_context: 0 mtd_table_mutex nvmem_cell_mutex irq_context: 0 mtd_table_mutex &k->k_lock irq_context: 0 mtd_table_mutex &dev->mutex &dev->power.lock irq_context: 0 mtd_table_mutex &dev->mutex &k->list_lock irq_context: 0 mtd_table_mutex &dev->mutex &k->k_lock irq_context: 0 mtd_table_mutex subsys mutex#49 irq_context: 0 mtd_table_mutex pin_fs_lock irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 mtd_table_mutex &pcp->lock &zone->lock irq_context: 0 mtd_table_mutex &zone->lock irq_context: 0 mtd_table_mutex (console_sem).lock irq_context: 0 mtd_table_mutex console_lock console_srcu console_owner_lock irq_context: 0 mtd_table_mutex console_lock console_srcu console_owner irq_context: 0 mtd_table_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 mtd_table_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 mtd_table_mutex &obj_hash[i].lock pool_lock irq_context: 0 mtd_table_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_callback percpu_ref_switch_lock irq_context: 0 mtd_table_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 mtd_table_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 mtd_table_mutex pcpu_alloc_mutex irq_context: 0 mtd_table_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 mtd_table_mutex cpu_hotplug_lock irq_context: 0 mtd_table_mutex &n->list_lock irq_context: 0 mtd_table_mutex batched_entropy_u32.lock irq_context: 0 mtd_table_mutex mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex blk_queue_ida.xa_lock irq_context: 0 mtd_table_mutex &q->sysfs_lock irq_context: 0 mtd_table_mutex &q->sysfs_lock &q->unused_hctx_lock irq_context: 0 mtd_table_mutex &q->sysfs_lock mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &q->sysfs_lock pool_lock#2 irq_context: 0 mtd_table_mutex &q->sysfs_lock &obj_hash[i].lock irq_context: 0 mtd_table_mutex &q->sysfs_lock cpu_hotplug_lock irq_context: 0 mtd_table_mutex &q->sysfs_lock cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 mtd_table_mutex &q->sysfs_lock fs_reclaim irq_context: 0 mtd_table_mutex &q->sysfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &q->sysfs_lock &xa->xa_lock#9 irq_context: 0 mtd_table_mutex &set->tag_list_lock irq_context: 0 mtd_table_mutex bio_slab_lock irq_context: 0 mtd_table_mutex percpu_counters_lock irq_context: 0 mtd_table_mutex &sb->s_type->i_lock_key#3 irq_context: 0 mtd_table_mutex &s->s_inode_list_lock irq_context: 0 mtd_table_mutex &xa->xa_lock#8 irq_context: 0 mtd_table_mutex lock &q->queue_lock irq_context: 0 mtd_table_mutex lock &q->queue_lock &blkcg->lock irq_context: 0 mtd_table_mutex &q->mq_freeze_lock irq_context: 0 mtd_table_mutex &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 mtd_table_mutex &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 mtd_table_mutex set->srcu irq_context: 0 mtd_table_mutex percpu_ref_switch_lock irq_context: 0 mtd_table_mutex &q->queue_lock irq_context: 0 mtd_table_mutex &q->queue_lock pool_lock#2 irq_context: 0 mtd_table_mutex &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 mtd_table_mutex &q->queue_lock pcpu_lock irq_context: 0 mtd_table_mutex &q->queue_lock &obj_hash[i].lock irq_context: 0 mtd_table_mutex &q->queue_lock percpu_counters_lock irq_context: 0 mtd_table_mutex &bdev->bd_size_lock irq_context: 0 mtd_table_mutex elv_list_lock irq_context: 0 mtd_table_mutex (work_completion)(&(&q->requeue_work)->work) irq_context: 0 mtd_table_mutex (work_completion)(&(&hctx->run_work)->work) irq_context: 0 mtd_table_mutex &q->debugfs_mutex irq_context: 0 mtd_table_mutex &cfs_rq->removed.lock irq_context: 0 mtd_table_mutex subsys mutex#36 irq_context: 0 mtd_table_mutex subsys mutex#36 &k->k_lock irq_context: 0 mtd_table_mutex dev_hotplug_mutex irq_context: 0 mtd_table_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 mtd_table_mutex rcu_read_lock pool_lock#2 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock fs_reclaim irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock pool_lock#2 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock lock kernfs_idr_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &root->kernfs_rwsem irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &c->lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &____s->seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex pin_fs_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock fs_reclaim irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock pool_lock#2 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock lock kernfs_idr_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &root->kernfs_rwsem irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock pcpu_alloc_mutex irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &obj_hash[i].lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->mq_freeze_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock set->srcu irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock percpu_ref_switch_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->queue_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &stats->lock irq_context: 0 mtd_table_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 mtd_table_mutex subsys mutex#37 irq_context: 0 mtd_table_mutex subsys mutex#37 &k->k_lock irq_context: 0 mtd_table_mutex cgwb_lock irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 mtd_table_mutex bdi_lock irq_context: 0 mtd_table_mutex inode_hash_lock irq_context: 0 mtd_table_mutex inode_hash_lock &sb->s_type->i_lock_key#3 irq_context: 0 pernet_ops_rwsem krc.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem krc.lock hrtimer_bases.lock irq_context: 0 pernet_ops_rwsem krc.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 pernet_ops_rwsem krc.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem krc.lock &base->lock irq_context: 0 pernet_ops_rwsem krc.lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex stack_depot_init_mutex irq_context: 0 rtnl_mutex cpu_hotplug_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 rtnl_mutex kthread_create_lock irq_context: 0 rtnl_mutex &p->pi_lock irq_context: 0 rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &x->wait irq_context: 0 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex wq_pool_mutex irq_context: 0 rtnl_mutex wq_pool_mutex &wq->mutex irq_context: 0 rtnl_mutex crngs.lock irq_context: 0 rtnl_mutex net_rwsem irq_context: 0 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex &x->wait#9 irq_context: 0 rtnl_mutex &k->list_lock irq_context: 0 rtnl_mutex gdp_mutex irq_context: 0 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 rtnl_mutex lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex bus_type_sem irq_context: 0 rtnl_mutex sysfs_symlink_target_lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 rtnl_mutex &root->kernfs_rwsem irq_context: 0 rtnl_mutex &dev->power.lock irq_context: 0 rtnl_mutex dpm_list_mtx irq_context: 0 rtnl_mutex uevent_sock_mutex irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex running_helpers_waitq.lock irq_context: 0 rtnl_mutex subsys mutex#17 irq_context: 0 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 rtnl_mutex &dir->lock#2 irq_context: 0 rtnl_mutex &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 rtnl_mutex uevent_sock_mutex &pcp->lock &zone->lock irq_context: 0 rtnl_mutex uevent_sock_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex batched_entropy_u8.lock irq_context: 0 rtnl_mutex kfence_freelist_lock irq_context: 0 rtnl_mutex uevent_sock_mutex &zone->lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex dev_hotplug_mutex irq_context: 0 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 rtnl_mutex dev_base_lock irq_context: 0 rtnl_mutex input_pool.lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex irq_context: 0 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)gid-cache-wq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) &obj_hash[i].lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) pool_lock#2 irq_context: 0 rtnl_mutex batched_entropy_u32.lock irq_context: 0 rtnl_mutex &tbl->lock irq_context: 0 rtnl_mutex sysctl_lock irq_context: 0 rtnl_mutex nl_table_lock irq_context: 0 rtnl_mutex nl_table_wait.lock irq_context: 0 rtnl_mutex rcu_read_lock &bond->stats_lock irq_context: 0 rtnl_mutex lweventlist_lock irq_context: 0 rtnl_mutex lweventlist_lock pool_lock#2 irq_context: 0 rtnl_mutex lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex dev_base_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex proc_subdir_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex proc_subdir_lock irq_context: 0 once_lock irq_context: 0 once_lock crngs.lock irq_context: 0 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&w->work) irq_context: 0 (wq_completion)events (work_completion)(&w->work) cpu_hotplug_lock irq_context: 0 (wq_completion)events (work_completion)(&w->work) cpu_hotplug_lock jump_label_mutex irq_context: 0 (wq_completion)events (work_completion)(&w->work) cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 (wq_completion)events (work_completion)(&w->work) cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 (wq_completion)events (work_completion)(&w->work) &obj_hash[i].lock irq_context: 0 (inet6addr_validator_chain).rwsem irq_context: 0 (inetaddr_validator_chain).rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex stack_depot_init_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex crngs.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 subsys mutex#50 irq_context: 0 subsys mutex#50 &k->k_lock irq_context: 0 gpio_lookup_lock irq_context: 0 mdio_board_lock irq_context: 0 mode_list_lock irq_context: 0 misc_mtx &cfs_rq->removed.lock irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &rq->__lock irq_context: 0 &dev->mutex stack_depot_init_mutex irq_context: 0 &dev->mutex napi_hash_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex &md->mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex &md->mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex &md->mutex &zone->lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex &rq->__lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex &cfs_rq->removed.lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex kfence_freelist_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &meta->lock irq_context: 0 &dev->mutex cpu_hotplug_lock &md->mutex irq_context: 0 &dev->mutex cpu_hotplug_lock &irq_desc_lock_class irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex fs_reclaim irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex pool_lock#2 irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex jump_label_mutex irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex jump_label_mutex text_mutex irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex krc.lock irq_context: 0 &dev->mutex rtnl_mutex irq_context: 0 &dev->mutex rtnl_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex rtnl_mutex &zone->lock irq_context: 0 &dev->mutex rtnl_mutex &____s->seqcount irq_context: 0 &dev->mutex rtnl_mutex pool_lock#2 irq_context: 0 &dev->mutex rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex rtnl_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex rtnl_mutex fs_reclaim irq_context: 0 &dev->mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex rtnl_mutex net_rwsem irq_context: 0 &dev->mutex rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 &dev->mutex rtnl_mutex &x->wait#9 irq_context: 0 &dev->mutex rtnl_mutex &c->lock irq_context: 0 &dev->mutex rtnl_mutex &k->list_lock irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex &rq->__lock irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex lock irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 &dev->mutex rtnl_mutex lock irq_context: 0 &dev->mutex rtnl_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex rtnl_mutex bus_type_sem irq_context: 0 &dev->mutex rtnl_mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex rtnl_mutex &dev->power.lock irq_context: 0 &dev->mutex rtnl_mutex dpm_list_mtx irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 &dev->mutex rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex rtnl_mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex rtnl_mutex &k->k_lock irq_context: 0 &dev->mutex rtnl_mutex subsys mutex#17 irq_context: 0 &dev->mutex rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 &dev->mutex rtnl_mutex &dir->lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fs_reclaim &rq->__lock irq_context: 0 &dev->mutex rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 &dev->mutex rtnl_mutex dev_hotplug_mutex irq_context: 0 &dev->mutex rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 &dev->mutex rtnl_mutex dev_base_lock irq_context: 0 &dev->mutex rtnl_mutex input_pool.lock irq_context: 0 &dev->mutex rtnl_mutex pcpu_alloc_mutex irq_context: 0 &dev->mutex rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 &dev->mutex rtnl_mutex batched_entropy_u32.lock irq_context: 0 &dev->mutex rtnl_mutex &tbl->lock irq_context: 0 &dev->mutex rtnl_mutex sysctl_lock irq_context: 0 &dev->mutex rtnl_mutex nl_table_lock irq_context: 0 &dev->mutex rtnl_mutex nl_table_wait.lock irq_context: hardirq|softirq &irq_desc_lock_class tmp_mask_lock irq_context: hardirq|softirq &irq_desc_lock_class tmp_mask_lock vector_lock irq_context: 0 &dev->mutex rtnl_mutex &rq->__lock irq_context: 0 &dev->mutex rtnl_mutex &cfs_rq->removed.lock irq_context: 0 &dev->mutex cpu_hotplug_lock cpuhp_state_mutex irq_context: hardirq|softirq &irq_desc_lock_class vector_lock irq_context: 0 &dev->mutex lweventlist_lock irq_context: 0 &dev->mutex lweventlist_lock pool_lock#2 irq_context: 0 &dev->mutex lweventlist_lock &dir->lock#2 irq_context: 0 &dev->mutex &base->lock &obj_hash[i].lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &dev->mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&vi->config_work) irq_context: 0 l3mdev_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock &q->mq_freeze_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex cpu_hotplug_lock cpuhp_state_mutex &rq->__lock irq_context: 0 subsys mutex#51 irq_context: 0 subsys mutex#51 &k->k_lock irq_context: 0 compressor_list_lock irq_context: 0 compressor_list_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem hwsim_netgroup_ida.xa_lock irq_context: 0 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 hwsim_radio_lock irq_context: 0 subsys mutex#52 irq_context: 0 subsys mutex#52 &k->k_lock irq_context: 0 deferred_probe_mutex irq_context: 0 rtnl_mutex param_lock irq_context: 0 rtnl_mutex param_lock rate_ctrl_mutex irq_context: 0 rtnl_mutex (console_sem).lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx fs_reclaim irq_context: 0 rtnl_mutex &rdev->wiphy.mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &rdev->wiphy.mtx pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &k->list_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex &k->list_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex fs_reclaim irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex lock kernfs_idr_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex &root->kernfs_rwsem irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex kobj_ns_type_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex &rdev->wiphy.mtx bus_type_sem irq_context: 0 rtnl_mutex &rdev->wiphy.mtx sysfs_symlink_target_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &c->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &zone->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &____s->seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &dev->power.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx dpm_list_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex fs_reclaim irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex nl_table_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex nl_table_wait.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx running_helpers_waitq.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &k->k_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx subsys mutex#53 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx subsys mutex#53 &k->k_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx pin_fs_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx nl_table_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx nl_table_wait.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx reg_requests_lock irq_context: 0 rtnl_mutex &base->lock irq_context: 0 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 rfkill_global_mutex irq_context: 0 rfkill_global_mutex fs_reclaim irq_context: 0 rfkill_global_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rfkill_global_mutex pool_lock#2 irq_context: 0 rfkill_global_mutex &k->list_lock irq_context: 0 rfkill_global_mutex lock irq_context: 0 rfkill_global_mutex lock kernfs_idr_lock irq_context: 0 rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 rfkill_global_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rfkill_global_mutex bus_type_sem irq_context: 0 rfkill_global_mutex sysfs_symlink_target_lock irq_context: 0 rfkill_global_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 rfkill_global_mutex &c->lock irq_context: 0 rfkill_global_mutex &pcp->lock &zone->lock irq_context: 0 rfkill_global_mutex &zone->lock irq_context: 0 rfkill_global_mutex &____s->seqcount irq_context: 0 rfkill_global_mutex &dev->power.lock irq_context: 0 rfkill_global_mutex dpm_list_mtx irq_context: 0 rfkill_global_mutex &rfkill->lock irq_context: 0 rfkill_global_mutex uevent_sock_mutex irq_context: 0 rfkill_global_mutex &obj_hash[i].lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock/1 irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rfkill_global_mutex running_helpers_waitq.lock irq_context: 0 rfkill_global_mutex &k->k_lock irq_context: 0 rfkill_global_mutex subsys mutex#40 irq_context: 0 rfkill_global_mutex subsys mutex#40 &k->k_lock irq_context: 0 rfkill_global_mutex triggers_list_lock irq_context: 0 rfkill_global_mutex leds_list_lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rfkill_global_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) rfkill_global_mutex rfkill_global_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) rfkill_global_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) rfkill_global_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) rfkill_global_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rfkill_global_mutex &cfs_rq->removed.lock irq_context: 0 rfkill_global_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx stack_depot_init_mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx pcpu_alloc_mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx pcpu_alloc_mutex pcpu_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->iflist_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx net_rwsem irq_context: 0 rtnl_mutex &rdev->wiphy.mtx net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &x->wait#9 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx subsys mutex#17 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx subsys mutex#17 &k->k_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &dir->lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx dev_hotplug_mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx dev_hotplug_mutex &dev->power.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx dev_base_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx input_pool.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx batched_entropy_u32.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &tbl->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx sysctl_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &fq->lock irq_context: 0 hwsim_radio_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock &q->mq_freeze_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock &q->mq_freeze_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock &q->mq_freeze_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex &c->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex &zone->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex &____s->seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &c->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &____s->seqcount irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rfkill_global_mutex &obj_hash[i].lock pool_lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &x->wait#15 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex bus_type_sem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex dpm_list_mtx irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &dev->power.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#42 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex device_links_srcu irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->power.lock &dev->power.lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex device_links_srcu irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex fwnode_link_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex fwnode_link_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx quarantine_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex device_links_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex probe_waitqueue.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex async_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#42 &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex bio_slab_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex pcpu_alloc_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex percpu_counters_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &s->s_inode_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &xa->xa_lock#8 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex lock &q->queue_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex lock &q->queue_lock &blkcg->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->mq_freeze_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->queue_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->queue_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->queue_lock pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->queue_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->queue_lock percpu_counters_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->queue_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex sd_index_ida.xa_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#54 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#54 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &hctx->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &hctx->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &x->wait#16 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx batched_entropy_u8.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx kfence_freelist_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex (&timer.timer) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex (console_sem).lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 console_owner_lock irq_context: 0 console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex console_owner irq_context: 0 &dev->mutex crngs.lock irq_context: 0 &dev->mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex rcu_read_lock &rq->__lock irq_context: 0 &dev->mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 lock sg_index_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 lock sg_index_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 chrdevs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &x->wait#9 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 gdp_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 gdp_mutex &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 gdp_mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 gdp_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 gdp_mutex &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 gdp_mutex &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 gdp_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 gdp_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 gdp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 gdp_mutex lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 gdp_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 gdp_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 bus_type_sem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &dev->power.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 dpm_list_mtx irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 req_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &x->wait#11 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 uevent_sock_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 running_helpers_waitq.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &bdev->bd_size_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex elv_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex (work_completion)(&(&q->requeue_work)->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex (work_completion)(&(&hctx->run_work)->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex batched_entropy_u32.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &zone->lock &____s->seqcount irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex &c->lock irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex &____s->seqcount irq_context: 0 &dev->mutex rtnl_mutex subsys mutex#55 irq_context: 0 &dev->mutex rtnl_mutex subsys mutex#55 &k->k_lock irq_context: 0 &dev->mutex rtnl_mutex stack_depot_init_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 subsys mutex#56 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 subsys mutex#56 &k->k_lock irq_context: 0 &dev->mutex rtnl_mutex crngs.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 (console_sem).lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex rtnl_mutex &sdata->sec_mtx irq_context: 0 &dev->mutex rtnl_mutex &sdata->sec_mtx &sec->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#44 console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex bsg_minor_ida.xa_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex chrdevs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex req_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &x->wait#11 irq_context: 0 &dev->mutex rtnl_mutex &local->iflist_mtx#2 irq_context: 0 &dev->mutex hwsim_phys_lock irq_context: 0 &dev->mutex nl_table_lock irq_context: 0 &dev->mutex nl_table_wait.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->debugfs_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex req_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &x->wait#11 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#57 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#57 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) async_scan_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->power.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#36 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#36 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex dev_hotplug_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex pin_fs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex hwsim_phys_lock fs_reclaim irq_context: 0 &dev->mutex hwsim_phys_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex hwsim_phys_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock pcpu_alloc_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->mq_freeze_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->queue_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &stats->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#37 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#37 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex cgwb_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex bdi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex inode_hash_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex inode_hash_lock &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex bdev_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &hctx->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &hctx->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &x->wait#16 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex (&timer.timer) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &q->sysfs_dir_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &q->sysfs_dir_lock &q->sysfs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &bdev->bd_size_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex free_vmap_area_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex vmap_area_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex init_mm.page_table_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &xa->xa_lock#7 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex lock#4 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &mapping->private_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &dd->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &folio_wait_table[i] irq_context: 0 (wq_completion)kblockd irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &dd->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &virtscsi_vq->vq_lock irq_context: softirq &ret->b_uptodate_lock irq_context: softirq &folio_wait_table[i] irq_context: softirq &folio_wait_table[i] &p->pi_lock irq_context: softirq &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: softirq &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &folio_wait_table[i] &p->pi_lock &cfs_rq->removed.lock irq_context: softirq &folio_wait_table[i] &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex (console_sem).lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &s->s_inode_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex pcpu_alloc_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &x->wait#9 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex bus_type_sem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &dev->power.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex dpm_list_mtx irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex req_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &x->wait#11 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex subsys mutex#36 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex subsys mutex#36 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &xa->xa_lock#8 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &xa->xa_lock#8 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex inode_hash_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex inode_hash_lock &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex purge_vmap_area_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#7 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex lock#5 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &lruvec->lru_lock irq_context: 0 xdomain_lock irq_context: 0 xdomain_lock fs_reclaim irq_context: 0 xdomain_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 xdomain_lock pool_lock#2 irq_context: 0 ioctl_mutex irq_context: 0 address_handler_list_lock irq_context: 0 card_mutex irq_context: 0 &type->i_mutex_dir_key#2 &c->lock irq_context: 0 &type->i_mutex_dir_key#2 &pcp->lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#2 &zone->lock irq_context: 0 &type->i_mutex_dir_key#2 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#2 &____s->seqcount irq_context: 0 subsys mutex#58 irq_context: 0 subsys mutex#58 &k->k_lock irq_context: 0 &sb->s_type->i_mutex_key#3 &cfs_rq->removed.lock irq_context: softirq &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &x->wait#18 irq_context: 0 &x->wait#18 &p->pi_lock irq_context: 0 &x->wait#18 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &txlock irq_context: 0 &txlock &list->lock#3 irq_context: 0 &txlock &txwq irq_context: 0 &x->wait#18 &p->pi_lock &rq->__lock irq_context: 0 &x->wait#18 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iocq[i].lock irq_context: 0 &iocq[i].lock &ktiowq[i] irq_context: 0 &txwq irq_context: 0 &txwq &p->pi_lock irq_context: 0 rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh pool_lock#2 irq_context: 0 subsys mutex#59 irq_context: 0 subsys mutex#59 &k->k_lock irq_context: 0 usb_bus_idr_lock irq_context: 0 usb_bus_idr_lock (usb_notifier_list).rwsem irq_context: 0 &sig->cred_guard_mutex pool_lock irq_context: 0 table_lock irq_context: 0 table_lock &k->list_lock irq_context: 0 table_lock fs_reclaim irq_context: 0 table_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 table_lock &c->lock irq_context: 0 table_lock &pcp->lock &zone->lock irq_context: 0 table_lock &zone->lock irq_context: 0 table_lock &____s->seqcount irq_context: 0 table_lock pool_lock#2 irq_context: 0 table_lock lock irq_context: 0 table_lock lock kernfs_idr_lock irq_context: 0 table_lock &root->kernfs_rwsem irq_context: 0 table_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 table_lock &k->k_lock irq_context: 0 table_lock uevent_sock_mutex irq_context: 0 table_lock &obj_hash[i].lock irq_context: 0 table_lock rcu_read_lock &pool->lock/1 irq_context: 0 table_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 table_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 table_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 table_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 table_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 table_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 table_lock running_helpers_waitq.lock irq_context: 0 table_lock (console_sem).lock irq_context: 0 table_lock console_lock console_srcu console_owner_lock irq_context: 0 table_lock console_lock console_srcu console_owner irq_context: 0 table_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 table_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 table_lock &rq->__lock irq_context: 0 table_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 table_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &cfs_rq->removed.lock irq_context: 0 table_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 table_lock batched_entropy_u8.lock irq_context: 0 table_lock kfence_freelist_lock irq_context: 0 table_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 table_lock &obj_hash[i].lock pool_lock irq_context: 0 table_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 table_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 table_lock lock kernfs_idr_lock &c->lock irq_context: 0 table_lock lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 table_lock lock kernfs_idr_lock &zone->lock irq_context: 0 table_lock lock kernfs_idr_lock &____s->seqcount irq_context: 0 &mm->mmap_lock &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex devtree_lock irq_context: 0 &dev->mutex usb_bus_idr_lock irq_context: 0 &dev->mutex usb_bus_idr_lock fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock pool_lock#2 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem irq_context: 0 &dev->mutex (usb_notifier_list).rwsem fs_reclaim irq_context: 0 &dev->mutex (usb_notifier_list).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex (usb_notifier_list).rwsem pool_lock#2 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem pin_fs_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &x->wait#9 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &obj_hash[i].lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &k->list_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex &k->list_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex fs_reclaim irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex &c->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex &____s->seqcount irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex (usb_notifier_list).rwsem lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem lock kernfs_idr_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex (usb_notifier_list).rwsem bus_type_sem irq_context: 0 &dev->mutex (usb_notifier_list).rwsem sysfs_symlink_target_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &dev->power.lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem dpm_list_mtx irq_context: 0 &dev->mutex (usb_notifier_list).rwsem req_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &p->pi_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &rq->__lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &x->wait#11 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem uevent_sock_mutex irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem running_helpers_waitq.lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &k->k_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem subsys mutex#59 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem subsys mutex#59 &k->k_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem mon_lock irq_context: 0 &dev->mutex usb_port_peer_mutex irq_context: 0 &dev->mutex device_state_lock irq_context: 0 &dev->mutex usb_bus_idr_lock mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_root_hub_lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_root_hub_lock hcd_urb_list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_root_hub_lock &bh->lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_root_hub_lock &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_root_hub_lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_root_hub_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &rq->__lock irq_context: softirq &bh->lock irq_context: softirq lock#6 irq_context: softirq lock#6 kcov_remote_lock irq_context: softirq &x->wait#19 irq_context: 0 &dev->mutex usb_bus_idr_lock &x->wait#19 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->power.lock irq_context: 0 &dev->mutex usb_bus_idr_lock device_links_srcu irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->power.lock &dev->power.lock/1 irq_context: 0 &dev->mutex usb_bus_idr_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock (console_sem).lock irq_context: 0 &dev->mutex usb_bus_idr_lock console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex usb_bus_idr_lock console_lock console_srcu console_owner irq_context: 0 &dev->mutex usb_bus_idr_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex usb_bus_idr_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex usb_bus_idr_lock input_pool.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &k->list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock lock irq_context: 0 &dev->mutex usb_bus_idr_lock lock kernfs_idr_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock bus_type_sem irq_context: 0 &dev->mutex usb_bus_idr_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock sysfs_symlink_target_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &k->k_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock dpm_list_mtx irq_context: 0 &dev->mutex usb_bus_idr_lock req_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &x->wait#11 irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem lock irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem lock kernfs_idr_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock uevent_sock_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex usb_bus_idr_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock running_helpers_waitq.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->power.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &k->list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &k->k_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex device_links_srcu irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex fwnode_link_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex device_links_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &(&priv->bus_notifier)->rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: softirq rcu_callback &base->lock irq_context: softirq rcu_callback &base->lock &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex set_config_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex devtree_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &x->wait#9 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &dev->power.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex hcd_root_hub_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex hcd_root_hub_lock hcd_urb_list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex hcd_root_hub_lock &bh->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex hcd_root_hub_lock &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex hcd_root_hub_lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex hcd_root_hub_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &x->wait#19 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex device_state_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex bus_type_sem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex dpm_list_mtx irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex uevent_sock_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &dev->power.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &k->list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &k->k_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &new_driver->dynids.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex device_links_srcu irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex fwnode_link_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex device_links_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &(&priv->bus_notifier)->rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &dev->power.lock &dev->power.lock/1 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex (console_sem).lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex console_lock console_srcu console_owner irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex hcd_root_hub_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex hcd_root_hub_lock hcd_urb_list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &dum_hcd->dum->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex hcd_root_hub_lock &bh->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex hcd_root_hub_lock &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex hcd_root_hub_lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex hcd_root_hub_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &x->wait#19 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex hcd_root_hub_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex hcd_root_hub_lock hcd_urb_list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &dum_hcd->dum->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex hcd_root_hub_lock &bh->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex hcd_root_hub_lock &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex hcd_root_hub_lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex hcd_root_hub_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &x->wait#19 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &x->wait#9 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &k->list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex bus_type_sem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dpm_list_mtx irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &k->k_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx &dev->power.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx pm_qos_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex component_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex device_links_srcu irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock &dev->power.lock/1 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx lock kernfs_idr_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &rq->__lock irq_context: 0 (wq_completion)pm irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &dev->power.lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &dev->power.lock &dev->power.wait_queue irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &lock->wait_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &pool->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex deferred_probe_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex uevent_sock_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex probe_waitqueue.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex subsys mutex#60 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &x->wait#9 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex (usb_notifier_list).rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex deferred_probe_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex probe_waitqueue.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &lock->wait_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex hcd_root_hub_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex hcd_root_hub_lock hcd_urb_list_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex fs_reclaim irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &dum_hcd->dum->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex hcd_root_hub_lock &bh->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex hcd_root_hub_lock &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex hcd_root_hub_lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex hcd_root_hub_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &x->wait#19 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &pool->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &x->wait#19 &p->pi_lock irq_context: softirq &x->wait#19 &p->pi_lock &rq->__lock irq_context: softirq &x->wait#19 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex (&timer.timer) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex hcd_root_hub_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex hcd_root_hub_lock hcd_urb_list_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)usb_hub_wq irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) lock#6 irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) lock#6 kcov_remote_lock irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) &dev->mutex &dev->power.lock irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) &dev->power.lock irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) &dev->power.lock &dev->power.wait_queue irq_context: 0 &dev->mutex usb_bus_idr_lock subsys mutex#60 irq_context: 0 &dev->mutex usb_bus_idr_lock &x->wait#9 irq_context: 0 &dev->mutex usb_bus_idr_lock &hub->irq_urb_lock irq_context: 0 &dev->mutex usb_bus_idr_lock (&hub->irq_urb_retry) irq_context: 0 &dev->mutex usb_bus_idr_lock &base->lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_urb_unlink_lock irq_context: softirq usb_kill_urb_queue.lock irq_context: 0 &dev->mutex usb_bus_idr_lock (work_completion)(&hub->tt.clear_work) irq_context: 0 &dev->mutex usb_bus_idr_lock &dum_hcd->dum->lock irq_context: 0 &dev->mutex usb_bus_idr_lock device_state_lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_urb_list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->power.lock &dev->power.wait_queue irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &dev->mutex (usb_notifier_list).rwsem lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &zone->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &c->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &pcp->lock &zone->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &zone->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &rq->__lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_root_hub_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex usb_bus_idr_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex lock kernfs_idr_lock pool_lock#2 irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &obj_hash[i].lock pool_lock irq_context: softirq lib/debugobjects.c:101 irq_context: softirq lib/debugobjects.c:101 rcu_read_lock &pool->lock irq_context: softirq lib/debugobjects.c:101 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq lib/debugobjects.c:101 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq lib/debugobjects.c:101 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq lib/debugobjects.c:101 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (debug_obj_work).work irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx &____s->seqcount irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex batched_entropy_u8.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex kfence_freelist_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &meta->lock irq_context: 0 &dev->mutex udc_lock irq_context: 0 &dev->mutex subsys mutex#61 irq_context: 0 &dev->mutex subsys mutex#61 &k->k_lock irq_context: 0 &dev->mutex gadget_id_numbers.xa_lock irq_context: 0 (wq_completion)events (work_completion)(&gadget->work) irq_context: 0 (wq_completion)events (work_completion)(&gadget->work) &root->kernfs_rwsem irq_context: 0 (wq_completion)events (work_completion)(&gadget->work) kernfs_notify_lock irq_context: 0 (wq_completion)events (work_completion)(&gadget->work) kernfs_notify_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&gadget->work) kernfs_notify_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events kernfs_notify_work irq_context: 0 (wq_completion)events kernfs_notify_work kernfs_notify_lock irq_context: 0 (wq_completion)events kernfs_notify_work &root->kernfs_supers_rwsem irq_context: 0 &dev->mutex subsys mutex#62 irq_context: 0 &dev->mutex gdp_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex gdp_mutex &zone->lock irq_context: 0 &mm->mmap_lock rcu_node_0 irq_context: 0 &dev->mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &n->list_lock irq_context: 0 func_lock irq_context: 0 g_tf_lock irq_context: 0 misc_mtx &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &vhci_hcd->vhci->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &vhci_hcd->vhci->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &vhci_hcd->vhci->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &c->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &zone->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &vhci_hcd->vhci->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx &c->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock batched_entropy_u8.lock irq_context: 0 &dev->mutex usb_bus_idr_lock kfence_freelist_lock irq_context: 0 &dev->mutex usb_bus_idr_lock lock kernfs_idr_lock &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock lock kernfs_idr_lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock quarantine_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &base->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &base->lock &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex lock kernfs_idr_lock &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex lock kernfs_idr_lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex lock kernfs_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex lock kernfs_idr_lock &____s->seqcount irq_context: softirq net/core/link_watch.c:31 irq_context: softirq net/core/link_watch.c:31 rcu_read_lock &pool->lock irq_context: softirq net/core/link_watch.c:31 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq net/core/link_watch.c:31 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq net/core/link_watch.c:31 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq net/core/link_watch.c:31 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem batched_entropy_u8.lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem kfence_freelist_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex lock kernfs_idr_lock &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex lock kernfs_idr_lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex lock kernfs_idr_lock &____s->seqcount irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq lib/debugobjects.c:101 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex &zone->lock irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex usb_bus_idr_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex batched_entropy_u8.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex kfence_freelist_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex lock kernfs_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: softirq drivers/char/random.c:251 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) batched_entropy_u64.lock crngs.lock base_crng.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex batched_entropy_u8.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex kfence_freelist_lock irq_context: 0 &dev->mutex usb_bus_idr_lock lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock lock kernfs_idr_lock &zone->lock irq_context: 0 &sig->cred_guard_mutex rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex &dev->mutex probe_waitqueue.lock irq_context: 0 i8042_lock irq_context: 0 &dev->mutex i8042_lock irq_context: 0 &dev->mutex i8042_lock (console_sem).lock irq_context: 0 &dev->mutex i8042_lock console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex i8042_lock console_lock console_srcu console_owner irq_context: 0 &dev->mutex i8042_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex i8042_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class ioapic_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock ioapic_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class ioapic_lock i8259A_lock irq_context: 0 &dev->mutex &x->wait#20 irq_context: hardirq i8042_lock &x->wait#20 irq_context: hardirq i8042_lock &x->wait#20 &p->pi_lock irq_context: hardirq i8042_lock &x->wait#20 &p->pi_lock &rq->__lock irq_context: hardirq i8042_lock &x->wait#20 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &fs->lock &dentry->d_lock irq_context: 0 &dev->mutex (&timer.timer) irq_context: 0 &dev->mutex &desc->request_mutex &rq->__lock irq_context: 0 &dev->mutex &desc->request_mutex proc_subdir_lock irq_context: 0 &dev->mutex &desc->request_mutex &ent->pde_unload_lock irq_context: 0 &dev->mutex &desc->request_mutex proc_inum_ida.xa_lock irq_context: 0 &dev->mutex &desc->request_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex &desc->request_mutex pool_lock#2 irq_context: 0 &dev->mutex serio_event_lock irq_context: 0 &dev->mutex serio_event_lock pool_lock#2 irq_context: 0 &dev->mutex serio_event_lock rcu_read_lock &pool->lock irq_context: 0 &dev->mutex serio_event_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->mutex serio_event_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->mutex serio_event_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex serio_event_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long irq_context: 0 (wq_completion)events_long serio_event_work irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex serio_event_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex i8042_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex semaphore->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->power.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex dpm_list_mtx irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &dev->power.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex subsys mutex#63 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex bus_type_sem irq_context: 0 input_ida.xa_lock irq_context: 0 input_ida.xa_lock pool_lock#2 irq_context: 0 subsys mutex#30 irq_context: 0 subsys mutex#30 &k->k_lock irq_context: 0 input_mutex input_ida.xa_lock irq_context: 0 input_mutex fs_reclaim irq_context: 0 input_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 input_mutex pool_lock#2 irq_context: 0 input_mutex &x->wait#9 irq_context: 0 input_mutex &obj_hash[i].lock irq_context: 0 input_mutex &dev->mutex#2 irq_context: 0 input_mutex chrdevs_lock irq_context: 0 input_mutex &k->list_lock irq_context: 0 input_mutex &c->lock irq_context: 0 input_mutex &pcp->lock &zone->lock irq_context: 0 input_mutex &zone->lock irq_context: 0 input_mutex &____s->seqcount irq_context: 0 input_mutex lock irq_context: 0 input_mutex lock kernfs_idr_lock irq_context: 0 input_mutex &root->kernfs_rwsem irq_context: 0 input_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 input_mutex bus_type_sem irq_context: 0 input_mutex sysfs_symlink_target_lock irq_context: 0 input_mutex &root->kernfs_rwsem irq_context: 0 input_mutex &dev->power.lock irq_context: 0 input_mutex dpm_list_mtx irq_context: 0 input_mutex req_lock irq_context: 0 input_mutex &p->pi_lock irq_context: 0 input_mutex &p->pi_lock &rq->__lock irq_context: 0 input_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 input_mutex &x->wait#11 irq_context: 0 input_mutex uevent_sock_mutex irq_context: 0 input_mutex rcu_read_lock &pool->lock/1 irq_context: 0 input_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 input_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 input_mutex running_helpers_waitq.lock irq_context: 0 input_mutex &k->k_lock irq_context: 0 input_mutex subsys mutex#30 irq_context: 0 input_mutex subsys mutex#30 &k->k_lock irq_context: 0 input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 serio_event_lock irq_context: 0 serio_event_lock pool_lock#2 irq_context: 0 serio_event_lock rcu_read_lock &pool->lock irq_context: 0 serio_event_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 serio_event_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 serio_event_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 serio_event_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex device_links_srcu irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex fwnode_link_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex device_links_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &(&priv->bus_notifier)->rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &x->wait#9 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &serio->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &serio->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex i8042_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &ps2dev->wait irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &base->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &pool->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &new_driver->dynids.lock irq_context: hardirq &serio->lock irq_context: 0 &dev->mutex rtc_ida.xa_lock irq_context: 0 &dev->mutex rtc_lock irq_context: 0 &dev->mutex &rtc->ops_lock irq_context: 0 &dev->mutex &rtc->ops_lock rtc_lock irq_context: 0 &dev->mutex chrdevs_lock irq_context: 0 &dev->mutex req_lock irq_context: 0 &dev->mutex &x->wait#11 irq_context: 0 &dev->mutex subsys mutex#27 irq_context: 0 &dev->mutex subsys mutex#27 &k->k_lock irq_context: 0 &dev->mutex subsys mutex#27 fs_reclaim irq_context: 0 &dev->mutex subsys mutex#27 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex subsys mutex#27 pool_lock#2 irq_context: 0 &dev->mutex subsys mutex#27 &x->wait#9 irq_context: 0 &dev->mutex subsys mutex#27 &obj_hash[i].lock irq_context: 0 &dev->mutex subsys mutex#27 platform_devid_ida.xa_lock irq_context: 0 &dev->mutex subsys mutex#27 &k->list_lock irq_context: 0 &dev->mutex subsys mutex#27 &____s->seqcount irq_context: 0 &dev->mutex subsys mutex#27 lock irq_context: 0 &dev->mutex subsys mutex#27 lock kernfs_idr_lock irq_context: 0 &dev->mutex subsys mutex#27 &root->kernfs_rwsem irq_context: 0 &dev->mutex subsys mutex#27 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex subsys mutex#27 bus_type_sem irq_context: 0 &dev->mutex subsys mutex#27 &c->lock irq_context: 0 &dev->mutex subsys mutex#27 sysfs_symlink_target_lock irq_context: 0 &dev->mutex subsys mutex#27 &root->kernfs_rwsem irq_context: 0 &dev->mutex subsys mutex#27 &dev->power.lock irq_context: 0 &dev->mutex subsys mutex#27 dpm_list_mtx irq_context: 0 &dev->mutex subsys mutex#27 &(&priv->bus_notifier)->rwsem irq_context: 0 &dev->mutex subsys mutex#27 uevent_sock_mutex irq_context: 0 &dev->mutex subsys mutex#27 rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex subsys mutex#27 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex subsys mutex#27 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex subsys mutex#27 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex subsys mutex#27 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex subsys mutex#27 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex subsys mutex#27 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex subsys mutex#27 running_helpers_waitq.lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &dev->power.lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &k->list_lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &k->k_lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex device_links_srcu irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex fwnode_link_lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex device_links_lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &(&priv->bus_notifier)->rwsem irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex fs_reclaim irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex pool_lock#2 irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex deferred_probe_mutex irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex uevent_sock_mutex irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &obj_hash[i].lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex probe_waitqueue.lock irq_context: 0 &dev->mutex subsys mutex#27 subsys mutex#3 irq_context: 0 &dev->mutex subsys mutex#27 wakeup_ida.xa_lock irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex &k->list_lock irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex fs_reclaim irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex lock irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &dev->mutex subsys mutex#27 subsys mutex#15 irq_context: 0 &dev->mutex subsys mutex#27 subsys mutex#15 &k->k_lock irq_context: 0 &dev->mutex subsys mutex#27 events_lock irq_context: 0 &dev->mutex subsys mutex#27 rtcdev_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 g_smscore_deviceslock irq_context: 0 g_smscore_deviceslock fs_reclaim irq_context: 0 g_smscore_deviceslock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 g_smscore_deviceslock pool_lock#2 irq_context: 0 &mm->mmap_lock fs_reclaim &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock fs_reclaim &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex batched_entropy_u8.lock irq_context: 0 &sig->cred_guard_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 &sig->cred_guard_mutex kfence_freelist_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex (&timer.timer) irq_context: hardirq|softirq &serio->lock &ps2dev->wait irq_context: 0 cx231xx_devlist_mutex irq_context: hardirq|softirq &serio->lock &ps2dev->wait &p->pi_lock irq_context: hardirq|softirq &serio->lock &ps2dev->wait &p->pi_lock &rq->__lock irq_context: hardirq|softirq &serio->lock &ps2dev->wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex bus_type_sem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &dev->power.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex dpm_list_mtx irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex subsys mutex#30 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex subsys mutex#30 &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex (console_sem).lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &dev->mutex#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &x->wait#9 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access bus_type_sem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &dev->power.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access dpm_list_mtx irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access subsys mutex#64 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access subsys mutex#64 &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access leds_list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock &trig->leddev_list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock rcu_read_lock &dev->event_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex input_ida.xa_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &x->wait#9 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex chrdevs_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex bus_type_sem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &dev->power.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex dpm_list_mtx irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex req_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &rq->__lock irq_context: 0 em28xx_devlist_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &x->wait#11 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex subsys mutex#30 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex subsys mutex#30 &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex input_devices_poll_wait.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex deferred_probe_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex probe_waitqueue.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &x->wait#9 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &serio->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &serio->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex i8042_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &ps2dev->wait irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &base->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex (&timer.timer) irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &pool->lock irq_context: 0 uevent_sock_mutex &rq->__lock irq_context: 0 pvr2_context_sync_data.lock irq_context: hardirq &serio->lock &dev->power.lock irq_context: hardirq &serio->lock &dev->event_lock#2 irq_context: 0 &dev->mutex core_lock irq_context: 0 &dev->mutex core_lock fs_reclaim irq_context: 0 &dev->mutex core_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex core_lock pool_lock#2 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem fs_reclaim irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem pool_lock#2 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem i2c_dev_list_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &x->wait#9 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &obj_hash[i].lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem chrdevs_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &k->list_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex &k->list_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex fs_reclaim irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex &c->lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex &zone->lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex &____s->seqcount irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem lock kernfs_idr_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem bus_type_sem irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem sysfs_symlink_target_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &dev->power.lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem dpm_list_mtx irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem req_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &p->pi_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &rq->__lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &x->wait#11 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &____s->seqcount irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &pcp->lock &zone->lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &zone->lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem uevent_sock_mutex irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem running_helpers_waitq.lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &k->k_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem subsys mutex#65 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem subsys mutex#65 &k->k_lock irq_context: 0 &dev->mutex subsys mutex#66 irq_context: 0 &dev->mutex core_lock &k->list_lock irq_context: 0 &dev->mutex core_lock &k->k_lock irq_context: 0 &dev->mutex dvbdev_register_lock irq_context: 0 &dev->mutex dvbdev_register_lock (console_sem).lock irq_context: 0 &dev->mutex dvbdev_register_lock console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex dvbdev_register_lock console_lock console_srcu console_owner irq_context: 0 &dev->mutex dvbdev_register_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex dvbdev_register_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex (kmod_concurrent_max).lock irq_context: 0 &dev->mutex &x->wait#17 irq_context: 0 &dev->mutex &dev->mutex &dev->power.lock &dev->power.lock/1 irq_context: 0 &dev->mutex &dev->mutex &dev->power.lock &dev->power.wait_queue irq_context: 0 &dev->mutex &dev->mutex device_links_srcu irq_context: 0 &dev->mutex &dev->mutex fwnode_link_lock irq_context: 0 &dev->mutex &dev->mutex device_links_lock irq_context: 0 &dev->mutex &dev->mutex &(&priv->bus_notifier)->rwsem irq_context: 0 &dev->mutex &dev->mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex &dev->mutex fs_reclaim irq_context: 0 &dev->mutex &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &dev->mutex pool_lock#2 irq_context: 0 &dev->mutex &dev->mutex lock irq_context: 0 &dev->mutex &dev->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &dev->mutex &dev->devres_lock irq_context: 0 &dev->mutex &dev->mutex deferred_probe_mutex irq_context: 0 &dev->mutex &dev->mutex uevent_sock_mutex irq_context: 0 &dev->mutex &dev->mutex &obj_hash[i].lock irq_context: 0 &dev->mutex &dev->mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex &dev->mutex running_helpers_waitq.lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#4 &obj_hash[i].lock irq_context: 0 &dev->mutex frontend_mutex irq_context: 0 &dev->mutex frontend_mutex fs_reclaim irq_context: 0 &dev->mutex frontend_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex frontend_mutex pool_lock#2 irq_context: 0 &dev->mutex frontend_mutex (console_sem).lock irq_context: 0 &dev->mutex frontend_mutex console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex frontend_mutex console_lock console_srcu console_owner irq_context: 0 &dev->mutex frontend_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex frontend_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock fs_reclaim irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock pool_lock#2 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock minor_rwsem irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &xa->xa_lock#10 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &mdev->graph_mutex irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &mdev->graph_mutex fs_reclaim irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &mdev->graph_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &mdev->graph_mutex pool_lock#2 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock (console_sem).lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock console_lock console_srcu console_owner irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &x->wait#9 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &obj_hash[i].lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &k->list_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex &k->list_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex fs_reclaim irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock lock kernfs_idr_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock bus_type_sem irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock sysfs_symlink_target_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &c->lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &____s->seqcount irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &dev->power.lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock dpm_list_mtx irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock req_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &p->pi_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &rq->__lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &x->wait#11 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock uevent_sock_mutex irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock running_helpers_waitq.lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &k->k_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock subsys mutex#67 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock subsys mutex#67 &k->k_lock irq_context: 0 &dev->mutex init_mm.page_table_lock irq_context: 0 &dev->mutex &dmxdev->lock irq_context: 0 &dev->mutex dvbdev_register_lock fs_reclaim irq_context: 0 &dev->mutex dvbdev_register_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex dvbdev_register_lock pool_lock#2 irq_context: 0 &dev->mutex dvbdev_register_lock minor_rwsem irq_context: 0 &dev->mutex dvbdev_register_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex dvbdev_register_lock &zone->lock irq_context: 0 &dev->mutex dvbdev_register_lock &____s->seqcount irq_context: 0 &dev->mutex dvbdev_register_lock &xa->xa_lock#10 irq_context: 0 &dev->mutex dvbdev_register_lock &mdev->graph_mutex irq_context: 0 &dev->mutex dvbdev_register_lock &c->lock irq_context: 0 &dev->mutex dvbdev_register_lock &xa->xa_lock#10 pool_lock#2 irq_context: 0 &dev->mutex dvbdev_register_lock &mdev->graph_mutex fs_reclaim irq_context: 0 &dev->mutex dvbdev_register_lock &mdev->graph_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex dvbdev_register_lock &mdev->graph_mutex pool_lock#2 irq_context: 0 &dev->mutex dvbdev_register_lock &mdev->graph_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex dvbdev_register_lock &x->wait#9 irq_context: 0 &dev->mutex dvbdev_register_lock &obj_hash[i].lock irq_context: 0 &dev->mutex dvbdev_register_lock &k->list_lock irq_context: 0 &dev->mutex dvbdev_register_lock gdp_mutex irq_context: 0 &dev->mutex dvbdev_register_lock gdp_mutex &k->list_lock irq_context: 0 &dev->mutex dvbdev_register_lock lock irq_context: 0 &dev->mutex dvbdev_register_lock lock kernfs_idr_lock irq_context: 0 &dev->mutex dvbdev_register_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex dvbdev_register_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex dvbdev_register_lock bus_type_sem irq_context: 0 &dev->mutex dvbdev_register_lock sysfs_symlink_target_lock irq_context: 0 &dev->mutex dvbdev_register_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex dvbdev_register_lock &dev->power.lock irq_context: 0 &dev->mutex dvbdev_register_lock dpm_list_mtx irq_context: 0 &dev->mutex dvbdev_register_lock req_lock irq_context: 0 &dev->mutex dvbdev_register_lock &p->pi_lock irq_context: 0 &dev->mutex dvbdev_register_lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex dvbdev_register_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex dvbdev_register_lock &rq->__lock irq_context: 0 &dev->mutex dvbdev_register_lock &x->wait#11 irq_context: 0 &dev->mutex dvbdev_register_lock uevent_sock_mutex irq_context: 0 &dev->mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex dvbdev_register_lock running_helpers_waitq.lock irq_context: 0 &dev->mutex dvbdev_register_lock &k->k_lock irq_context: 0 &dev->mutex dvbdev_register_lock subsys mutex#67 irq_context: 0 &dev->mutex dvbdev_register_lock subsys mutex#67 &k->k_lock irq_context: 0 &dev->mutex dvbdev_register_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex dvbdev_register_lock rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex &dvbdemux->mutex irq_context: 0 &dev->mutex media_devnode_lock irq_context: 0 &dev->mutex subsys mutex#68 irq_context: 0 &dev->mutex videodev_lock irq_context: 0 &dev->mutex rcu_read_lock rcu_node_0 irq_context: 0 &dev->mutex subsys mutex#69 irq_context: 0 &dev->mutex subsys mutex#69 &k->k_lock irq_context: 0 &dev->mutex &xa->xa_lock#10 irq_context: 0 &dev->mutex &mdev->graph_mutex irq_context: 0 &dev->mutex &mdev->graph_mutex fs_reclaim irq_context: 0 &dev->mutex &mdev->graph_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &mdev->graph_mutex pool_lock#2 irq_context: 0 &dev->mutex vimc_sensor:393:(&vsensor->hdl)->_lock irq_context: 0 &dev->mutex &v4l2_dev->lock irq_context: 0 &dev->mutex vimc_debayer:578:(&vdebayer->hdl)->_lock irq_context: 0 &dev->mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex vimc_lens:61:(&vlens->hdl)->_lock irq_context: 0 &sig->cred_guard_mutex delayed_uprobe_lock &rq->__lock irq_context: 0 rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &dev->mutex tk_core.seq.seqcount irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1618:(hdl_fb)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1620:(hdl_vid_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1622:(hdl_vid_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1625:(hdl_vbi_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1627:(hdl_vbi_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1630:(hdl_radio_rx)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1632:(hdl_radio_tx)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1634:(hdl_sdr_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1636:(hdl_meta_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1638:(hdl_meta_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1640:(hdl_tch_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1620:(hdl_vid_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock fs_reclaim irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock pool_lock#2 irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &c->lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &____s->seqcount irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock vivid_ctrls:1620:(hdl_vid_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock fs_reclaim irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock pool_lock#2 irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock vivid_ctrls:1620:(hdl_vid_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock fs_reclaim irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock pool_lock#2 irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1620:(hdl_vid_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock fs_reclaim irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock pool_lock#2 irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock vivid_ctrls:1620:(hdl_vid_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock fs_reclaim irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock pool_lock#2 irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock vivid_ctrls:1620:(hdl_vid_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock fs_reclaim irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock pool_lock#2 irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1622:(hdl_vid_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock vivid_ctrls:1622:(hdl_vid_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1622:(hdl_vid_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1625:(hdl_vbi_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1625:(hdl_vbi_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock vivid_ctrls:1625:(hdl_vbi_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock vivid_ctrls:1625:(hdl_vbi_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1627:(hdl_vbi_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1627:(hdl_vbi_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1630:(hdl_radio_rx)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock vivid_ctrls:1630:(hdl_radio_rx)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1632:(hdl_radio_tx)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock vivid_ctrls:1632:(hdl_radio_tx)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1634:(hdl_sdr_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1634:(hdl_sdr_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1636:(hdl_meta_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1636:(hdl_meta_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1638:(hdl_meta_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1638:(hdl_meta_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1640:(hdl_tch_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1640:(hdl_tch_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &obj_hash[i].lock irq_context: 0 &adap->kthread_waitq irq_context: 0 &dev->mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex cec_devnode_lock irq_context: 0 &dev->mutex subsys mutex#70 irq_context: 0 &dev->mutex pin_fs_lock irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &dev->mutex &adap->lock irq_context: 0 &dev->mutex &adap->lock tk_core.seq.seqcount irq_context: 0 &dev->mutex &adap->lock &adap->devnode.lock_fhs irq_context: 0 &dev->cec_xfers_slock irq_context: 0 &dev->kthread_waitq_cec irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock &c->lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock &____s->seqcount irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock &rq->__lock irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock &c->lock irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock &____s->seqcount irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock &c->lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock &____s->seqcount irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock &c->lock irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock &____s->seqcount irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rcu_node_0 irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 rcu_node_0 irq_context: 0 &dev->mutex lock kernfs_idr_lock &c->lock irq_context: 0 &dev->mutex fs_reclaim &rq->__lock irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &pcp->lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex bus_type_sem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &dev->power.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex dpm_list_mtx irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex subsys mutex#30 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex subsys mutex#30 &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex (console_sem).lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex console_owner_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex console_owner irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex input_ida.xa_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &x->wait#9 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &dev->mutex#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex chrdevs_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex bus_type_sem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &dev->power.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex dpm_list_mtx irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex req_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &x->wait#11 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex subsys mutex#30 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex subsys mutex#30 &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &mousedev->mutex/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex input_devices_poll_wait.lock irq_context: hardirq &serio->lock &ps2dev->wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &rq->__lock irq_context: softirq mm/vmstat.c:2014 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock &zone->lock irq_context: 0 &dev->mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 &dev->mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 &dev->mutex fill_pool_map-wait-type-override &zone->lock irq_context: 0 &dev->mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 ptp_clocks_map.xa_lock irq_context: 0 subsys mutex#71 irq_context: 0 subsys mutex#71 &k->k_lock irq_context: 0 pers_lock irq_context: 0 _lock irq_context: 0 dm_bufio_clients_lock irq_context: 0 _ps_lock irq_context: 0 _lock#2 irq_context: 0 _lock#3 irq_context: 0 register_lock#2 irq_context: 0 subsys mutex#72 irq_context: 0 subsys mutex#72 &k->k_lock irq_context: 0 bp_lock irq_context: 0 bp_lock irq_context: 0 gdp_mutex &c->lock irq_context: 0 gdp_mutex &____s->seqcount irq_context: 0 subsys mutex#73 irq_context: 0 subsys mutex#73 &k->k_lock irq_context: softirq (&dsp_spl_tl) irq_context: softirq (&dsp_spl_tl) dsp_lock irq_context: softirq (&dsp_spl_tl) dsp_lock iclock_lock irq_context: softirq (&dsp_spl_tl) dsp_lock iclock_lock tk_core.seq.seqcount irq_context: softirq (&dsp_spl_tl) dsp_lock &obj_hash[i].lock irq_context: softirq (&dsp_spl_tl) dsp_lock &base->lock irq_context: softirq (&dsp_spl_tl) dsp_lock &base->lock &obj_hash[i].lock irq_context: 0 leds_list_lock &led_cdev->trigger_lock irq_context: 0 rtnl_mutex lock#7 irq_context: 0 intf_mutex irq_context: 0 iscsi_transport_lock irq_context: 0 subsys mutex#74 irq_context: 0 subsys mutex#74 &k->k_lock irq_context: 0 &tx_task->waiting irq_context: 0 link_ops_rwsem irq_context: 0 disable_lock irq_context: 0 disable_lock fs_reclaim irq_context: 0 disable_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 disable_lock pool_lock#2 irq_context: 0 disable_lock &x->wait#9 irq_context: 0 disable_lock &obj_hash[i].lock irq_context: 0 disable_lock &k->list_lock irq_context: 0 disable_lock lock irq_context: 0 disable_lock lock kernfs_idr_lock irq_context: 0 disable_lock &root->kernfs_rwsem irq_context: 0 disable_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 disable_lock bus_type_sem irq_context: 0 disable_lock &c->lock irq_context: 0 disable_lock &____s->seqcount irq_context: 0 disable_lock sysfs_symlink_target_lock irq_context: 0 disable_lock &k->k_lock irq_context: 0 disable_lock &root->kernfs_rwsem irq_context: 0 disable_lock &dev->power.lock irq_context: 0 disable_lock dpm_list_mtx irq_context: 0 disable_lock &(&priv->bus_notifier)->rwsem irq_context: 0 disable_lock uevent_sock_mutex irq_context: 0 disable_lock rcu_read_lock &pool->lock/1 irq_context: 0 disable_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 disable_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 disable_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 disable_lock running_helpers_waitq.lock irq_context: 0 disable_lock &dev->mutex &dev->power.lock irq_context: 0 disable_lock &dev->mutex &k->list_lock irq_context: 0 disable_lock &dev->mutex &k->k_lock irq_context: 0 disable_lock subsys mutex#3 irq_context: 0 subsys mutex#75 irq_context: 0 subsys mutex#75 &k->k_lock irq_context: 0 service_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &x->wait#17 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq drivers/block/floppy.c:640 irq_context: softirq drivers/block/floppy.c:640 rcu_read_lock &pool->lock/1 irq_context: softirq drivers/block/floppy.c:640 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq drivers/block/floppy.c:640 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 fill_pool_map-wait-type-override &cfs_rq->removed.lock irq_context: 0 fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &q->blkcg_mutex irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &q->blkcg_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &q->blkcg_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &q->blkcg_mutex &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &q->blkcg_mutex &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &q->blkcg_mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &q->blkcg_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &xa->xa_lock#9 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) (console_sem).lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) pcpu_lock irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) blk_queue_ida.xa_lock irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex proc_subdir_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &ent->pde_unload_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex proc_inum_ida.xa_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) klist_remove_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) (&motor_off_timer[drive]) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &xa->xa_lock#9 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->unused_hctx_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &blkcg->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &blkcg->lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &blkcg->lock percpu_ref_switch_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &blkcg->lock percpu_ref_switch_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) (&sq->pending_timer) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) (work_completion)(&td->dispatch_work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock (&sq->pending_timer) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock percpu_counters_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &xa->xa_lock#7 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &fsnotify_mark_srcu irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&pool->mayday_timer) &pool->lock/1 irq_context: softirq (&pool->mayday_timer) &pool->lock/1 wq_mayday_lock irq_context: softirq (&pool->mayday_timer) &obj_hash[i].lock irq_context: softirq (&pool->mayday_timer) &base->lock irq_context: softirq (&pool->mayday_timer) &base->lock &obj_hash[i].lock irq_context: softirq (&pool->mayday_timer) &pool->lock irq_context: softirq (&pool->mayday_timer) &pool->lock wq_mayday_lock irq_context: softirq (&pool->mayday_timer) &pool->lock wq_mayday_lock &p->pi_lock irq_context: softirq (&pool->mayday_timer) &pool->lock wq_mayday_lock &p->pi_lock &rq->__lock irq_context: softirq (&pool->mayday_timer) &pool->lock wq_mayday_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 wq_pool_attach_mutex &p->pi_lock irq_context: 0 wq_pool_attach_mutex &p->pi_lock &rq->__lock irq_context: 0 wq_pool_attach_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 wq_pool_attach_mutex &stopper->lock irq_context: 0 wq_pool_attach_mutex &stop_pi_lock irq_context: 0 wq_pool_attach_mutex &stop_pi_lock &rq->__lock irq_context: 0 wq_pool_attach_mutex &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 wq_pool_attach_mutex &rq->__lock irq_context: 0 &x->wait#7 irq_context: 0 wq_pool_attach_mutex &x->wait#7 irq_context: softirq rcu_callback percpu_ref_switch_waitq.lock irq_context: softirq rcu_callback rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_callback rcu_read_lock pool_lock#2 irq_context: softirq rcu_callback percpu_counters_lock irq_context: softirq rcu_callback pool_lock irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &obj_hash[i].lock pool_lock irq_context: 0 comedi_drivers_list_lock irq_context: 0 subsys mutex#76 irq_context: 0 subsys mutex#76 &k->k_lock irq_context: 0 snd_ctl_layer_rwsem irq_context: 0 snd_card_mutex irq_context: 0 snd_ioctl_rwsem irq_context: 0 strings irq_context: 0 strings fs_reclaim irq_context: 0 strings fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 strings pool_lock#2 irq_context: 0 register_mutex irq_context: 0 sound_mutex irq_context: 0 sound_mutex fs_reclaim irq_context: 0 sound_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sound_mutex pool_lock#2 irq_context: 0 sound_mutex &k->list_lock irq_context: 0 sound_mutex gdp_mutex irq_context: 0 sound_mutex gdp_mutex &k->list_lock irq_context: 0 sound_mutex lock irq_context: 0 sound_mutex lock kernfs_idr_lock irq_context: 0 sound_mutex &root->kernfs_rwsem irq_context: 0 sound_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sound_mutex bus_type_sem irq_context: 0 sound_mutex sysfs_symlink_target_lock irq_context: 0 sound_mutex &root->kernfs_rwsem irq_context: 0 sound_mutex &dev->power.lock irq_context: 0 sound_mutex dpm_list_mtx irq_context: 0 sound_mutex req_lock irq_context: 0 sound_mutex &p->pi_lock irq_context: 0 sound_mutex &p->pi_lock &rq->__lock irq_context: 0 sound_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sound_mutex &rq->__lock irq_context: 0 sound_mutex &x->wait#11 irq_context: 0 sound_mutex &obj_hash[i].lock irq_context: 0 sound_mutex uevent_sock_mutex irq_context: 0 sound_mutex rcu_read_lock &pool->lock/1 irq_context: 0 sound_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sound_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sound_mutex running_helpers_waitq.lock irq_context: 0 sound_mutex subsys mutex#76 irq_context: 0 sound_mutex subsys mutex#76 &k->k_lock irq_context: 0 register_mutex#2 irq_context: 0 register_mutex#3 irq_context: 0 register_mutex#3 fs_reclaim irq_context: 0 register_mutex#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 register_mutex#3 pool_lock#2 irq_context: 0 register_mutex#3 sound_mutex irq_context: 0 register_mutex#3 sound_mutex fs_reclaim irq_context: 0 register_mutex#3 sound_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 register_mutex#3 sound_mutex pool_lock#2 irq_context: 0 register_mutex#3 sound_mutex &k->list_lock irq_context: 0 register_mutex#3 sound_mutex gdp_mutex irq_context: 0 register_mutex#3 sound_mutex gdp_mutex &k->list_lock irq_context: 0 register_mutex#3 sound_mutex lock irq_context: 0 register_mutex#3 sound_mutex lock kernfs_idr_lock irq_context: 0 register_mutex#3 sound_mutex &root->kernfs_rwsem irq_context: 0 register_mutex#3 sound_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 register_mutex#3 sound_mutex bus_type_sem irq_context: 0 register_mutex#3 sound_mutex sysfs_symlink_target_lock irq_context: 0 register_mutex#3 sound_mutex &c->lock irq_context: 0 register_mutex#3 sound_mutex &____s->seqcount irq_context: 0 register_mutex#3 sound_mutex &root->kernfs_rwsem irq_context: 0 register_mutex#3 sound_mutex &dev->power.lock irq_context: 0 register_mutex#3 sound_mutex dpm_list_mtx irq_context: 0 register_mutex#3 sound_mutex req_lock irq_context: 0 register_mutex#3 sound_mutex &p->pi_lock irq_context: 0 register_mutex#3 sound_mutex &p->pi_lock &rq->__lock irq_context: 0 register_mutex#3 sound_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 register_mutex#3 sound_mutex &rq->__lock irq_context: 0 register_mutex#3 sound_mutex &cfs_rq->removed.lock irq_context: 0 register_mutex#3 sound_mutex &obj_hash[i].lock irq_context: 0 register_mutex#3 sound_mutex &x->wait#11 irq_context: 0 register_mutex#3 sound_mutex &pcp->lock &zone->lock irq_context: 0 register_mutex#3 sound_mutex &zone->lock irq_context: 0 register_mutex#3 sound_mutex rcu_read_lock pool_lock#2 irq_context: 0 register_mutex#3 sound_mutex uevent_sock_mutex irq_context: 0 register_mutex#3 sound_mutex rcu_read_lock &pool->lock/1 irq_context: 0 register_mutex#3 sound_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 register_mutex#3 sound_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 register_mutex#3 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 register_mutex#3 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 register_mutex#3 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 register_mutex#3 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 register_mutex#3 sound_mutex running_helpers_waitq.lock irq_context: 0 register_mutex#3 sound_mutex subsys mutex#76 irq_context: 0 register_mutex#3 sound_mutex subsys mutex#76 &k->k_lock irq_context: 0 info_mutex &c->lock irq_context: 0 info_mutex &____s->seqcount irq_context: 0 register_mutex#3 clients_lock irq_context: 0 &client->ports_mutex irq_context: 0 &client->ports_mutex &client->ports_lock irq_context: 0 register_mutex#4 irq_context: 0 register_mutex#4 fs_reclaim irq_context: 0 register_mutex#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 register_mutex#4 pool_lock#2 irq_context: 0 register_mutex#4 sound_oss_mutex irq_context: 0 register_mutex#4 sound_oss_mutex fs_reclaim irq_context: 0 register_mutex#4 sound_oss_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 register_mutex#4 sound_oss_mutex pool_lock#2 irq_context: 0 register_mutex#4 sound_oss_mutex sound_loader_lock irq_context: 0 register_mutex#4 sound_oss_mutex &x->wait#9 irq_context: 0 register_mutex#4 sound_oss_mutex &obj_hash[i].lock irq_context: 0 register_mutex#4 sound_oss_mutex &k->list_lock irq_context: 0 register_mutex#4 sound_oss_mutex gdp_mutex irq_context: 0 register_mutex#4 sound_oss_mutex gdp_mutex &k->list_lock irq_context: 0 register_mutex#4 sound_oss_mutex lock irq_context: 0 register_mutex#4 sound_oss_mutex lock kernfs_idr_lock irq_context: 0 register_mutex#4 sound_oss_mutex &root->kernfs_rwsem irq_context: 0 register_mutex#4 sound_oss_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 register_mutex#4 sound_oss_mutex bus_type_sem irq_context: 0 register_mutex#4 sound_oss_mutex sysfs_symlink_target_lock irq_context: 0 register_mutex#4 sound_oss_mutex &root->kernfs_rwsem irq_context: 0 register_mutex#4 sound_oss_mutex &c->lock irq_context: 0 register_mutex#4 sound_oss_mutex &____s->seqcount irq_context: 0 register_mutex#4 sound_oss_mutex &dev->power.lock irq_context: 0 register_mutex#4 sound_oss_mutex dpm_list_mtx irq_context: 0 register_mutex#4 sound_oss_mutex req_lock irq_context: 0 register_mutex#4 sound_oss_mutex &p->pi_lock irq_context: 0 register_mutex#4 sound_oss_mutex &p->pi_lock &rq->__lock irq_context: 0 register_mutex#4 sound_oss_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 register_mutex#4 sound_oss_mutex &rq->__lock irq_context: 0 register_mutex#4 sound_oss_mutex &x->wait#11 irq_context: 0 register_mutex#4 sound_oss_mutex uevent_sock_mutex irq_context: 0 register_mutex#4 sound_oss_mutex rcu_read_lock &pool->lock/1 irq_context: 0 register_mutex#4 sound_oss_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 register_mutex#4 sound_oss_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 register_mutex#4 sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 register_mutex#4 sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 register_mutex#4 sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 register_mutex#4 sound_oss_mutex running_helpers_waitq.lock irq_context: 0 register_mutex#4 sound_oss_mutex subsys mutex#76 irq_context: 0 register_mutex#4 sound_oss_mutex subsys mutex#76 &k->k_lock irq_context: 0 register_mutex#4 &c->lock irq_context: 0 register_mutex#4 &____s->seqcount irq_context: 0 register_mutex#4 sound_oss_mutex &pcp->lock &zone->lock irq_context: 0 register_mutex#4 sound_oss_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 clients_lock irq_context: 0 &client->ports_lock irq_context: 0 &grp->list_mutex/1 irq_context: 0 &grp->list_mutex#2 irq_context: 0 &grp->list_mutex#2 &grp->list_lock irq_context: 0 &grp->list_mutex/1 clients_lock irq_context: 0 &grp->list_mutex/1 &client->ports_lock irq_context: 0 (wq_completion)events async_lookup_work irq_context: 0 (wq_completion)events async_lookup_work fs_reclaim irq_context: 0 (wq_completion)events async_lookup_work fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events async_lookup_work pool_lock#2 irq_context: 0 (wq_completion)events async_lookup_work clients_lock irq_context: 0 (wq_completion)events async_lookup_work &client->ports_lock irq_context: 0 (wq_completion)events async_lookup_work snd_card_mutex irq_context: 0 (wq_completion)events async_lookup_work (kmod_concurrent_max).lock irq_context: 0 (wq_completion)events async_lookup_work &obj_hash[i].lock irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events async_lookup_work &x->wait#17 irq_context: 0 (wq_completion)events async_lookup_work &pool->lock irq_context: 0 (wq_completion)events async_lookup_work &rq->__lock irq_context: 0 (wq_completion)events async_lookup_work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &grp->list_mutex/1 register_lock#3 irq_context: 0 &grp->list_mutex/1 fs_reclaim irq_context: 0 &grp->list_mutex/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &grp->list_mutex/1 pool_lock#2 irq_context: 0 &dev->mutex snd_card_mutex irq_context: 0 &dev->mutex &entry->access irq_context: 0 &dev->mutex info_mutex irq_context: 0 &dev->mutex info_mutex proc_subdir_lock irq_context: 0 &dev->mutex info_mutex fs_reclaim irq_context: 0 &dev->mutex info_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex info_mutex pool_lock#2 irq_context: 0 &dev->mutex info_mutex proc_inum_ida.xa_lock irq_context: 0 &dev->mutex info_mutex proc_subdir_lock irq_context: 0 &dev->mutex &card->controls_rwsem irq_context: 0 &dev->mutex &card->controls_rwsem &xa->xa_lock#11 irq_context: 0 &dev->mutex &card->controls_rwsem &xa->xa_lock#11 pool_lock#2 irq_context: 0 &dev->mutex &card->controls_rwsem fs_reclaim irq_context: 0 &dev->mutex &card->controls_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &card->controls_rwsem &xa->xa_lock#11 &c->lock irq_context: 0 &dev->mutex &card->controls_rwsem &xa->xa_lock#11 &____s->seqcount irq_context: 0 &dev->mutex &card->controls_rwsem &card->ctl_files_rwlock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem irq_context: 0 &dev->mutex &card->controls_rwsem &xa->xa_lock#11 &pcp->lock &zone->lock irq_context: 0 &dev->mutex &card->controls_rwsem &xa->xa_lock#11 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex subsys mutex#76 irq_context: 0 &dev->mutex subsys mutex#76 &k->k_lock irq_context: 0 &dev->mutex register_mutex#2 irq_context: 0 &dev->mutex register_mutex#2 fs_reclaim irq_context: 0 &dev->mutex register_mutex#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex register_mutex#2 pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_mutex irq_context: 0 &dev->mutex register_mutex#2 sound_mutex fs_reclaim irq_context: 0 &dev->mutex register_mutex#2 sound_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex register_mutex#2 sound_mutex pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &k->list_lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex register_mutex#2 sound_mutex bus_type_sem irq_context: 0 &dev->mutex register_mutex#2 sound_mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &dev->power.lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex dpm_list_mtx irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &c->lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &rq->__lock irq_context: 0 (wq_completion)events async_lookup_work running_helpers_waitq.lock irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events async_lookup_work autoload_work irq_context: 0 (wq_completion)events async_lookup_work &x->wait#10 irq_context: 0 (wq_completion)events async_lookup_work &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events async_lookup_work &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events async_lookup_work &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events autoload_work irq_context: 0 (wq_completion)events autoload_work &k->list_lock irq_context: 0 (wq_completion)events autoload_work &k->k_lock irq_context: 0 (wq_completion)events (work_completion)(&barr->work) irq_context: 0 (wq_completion)events (work_completion)(&barr->work) &x->wait#10 irq_context: 0 (wq_completion)events (work_completion)(&barr->work) &x->wait#10 &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &cfs_rq->removed.lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &____s->seqcount irq_context: 0 &dev->mutex register_mutex#2 sound_mutex req_lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &p->pi_lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &x->wait#11 irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &zone->lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_mutex uevent_sock_mutex irq_context: 0 &dev->mutex register_mutex#2 sound_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex register_mutex#2 sound_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &k->k_lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex subsys mutex#76 irq_context: 0 &dev->mutex register_mutex#2 sound_mutex subsys mutex#76 &k->k_lock irq_context: 0 &dev->mutex register_mutex#2 &obj_hash[i].lock irq_context: 0 &dev->mutex register_mutex#2 register_mutex irq_context: 0 &dev->mutex register_mutex#2 &c->lock irq_context: 0 &dev->mutex register_mutex#2 &____s->seqcount irq_context: 0 &dev->mutex register_mutex#2 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex register_mutex#2 &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex fs_reclaim irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex sound_loader_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &x->wait#9 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &k->list_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex bus_type_sem irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &c->lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &____s->seqcount irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &dev->power.lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex dpm_list_mtx irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex req_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &p->pi_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &rq->__lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &x->wait#11 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex uevent_sock_mutex irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &k->k_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex subsys mutex#76 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex subsys mutex#76 &k->k_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex register_mutex#2 strings irq_context: 0 &dev->mutex register_mutex#2 strings fs_reclaim irq_context: 0 &dev->mutex register_mutex#2 strings fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex register_mutex#2 strings pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 &pcp->lock &zone->lock irq_context: 0 &dev->mutex register_mutex#2 &zone->lock irq_context: 0 &dev->mutex register_mutex#2 rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 &entry->access irq_context: 0 &dev->mutex register_mutex#2 info_mutex irq_context: 0 &dev->mutex sound_mutex irq_context: 0 &dev->mutex sound_mutex fs_reclaim irq_context: 0 &dev->mutex sound_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex sound_mutex pool_lock#2 irq_context: 0 &dev->mutex sound_mutex &k->list_lock irq_context: 0 &dev->mutex sound_mutex lock irq_context: 0 &dev->mutex sound_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex sound_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex sound_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex sound_mutex bus_type_sem irq_context: 0 &dev->mutex sound_mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex sound_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex sound_mutex &c->lock irq_context: 0 &dev->mutex sound_mutex &____s->seqcount irq_context: 0 &dev->mutex sound_mutex &dev->power.lock irq_context: 0 &dev->mutex sound_mutex dpm_list_mtx irq_context: 0 &dev->mutex sound_mutex req_lock irq_context: 0 &dev->mutex sound_mutex &p->pi_lock irq_context: 0 &dev->mutex sound_mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex sound_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex sound_mutex &rq->__lock irq_context: 0 &dev->mutex sound_mutex &x->wait#11 irq_context: 0 &dev->mutex sound_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex sound_mutex uevent_sock_mutex irq_context: 0 &dev->mutex sound_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex sound_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex sound_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex sound_mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex sound_mutex &k->k_lock irq_context: 0 &dev->mutex sound_mutex subsys mutex#76 irq_context: 0 &dev->mutex sound_mutex subsys mutex#76 &k->k_lock irq_context: 0 &dev->mutex &card->controls_rwsem irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem snd_ctl_led_mutex irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem fs_reclaim irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem pool_lock#2 irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &x->wait#9 irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &obj_hash[i].lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &k->list_lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &c->lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &____s->seqcount irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem lock kernfs_idr_lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem bus_type_sem irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &dev->power.lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem dpm_list_mtx irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &k->k_lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem sysfs_symlink_target_lock irq_context: 0 &dev->mutex info_mutex &c->lock irq_context: 0 &dev->mutex info_mutex &____s->seqcount irq_context: 0 &dev->mutex info_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex info_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex sound_oss_mutex irq_context: 0 &dev->mutex sound_oss_mutex fs_reclaim irq_context: 0 &dev->mutex sound_oss_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex sound_oss_mutex pool_lock#2 irq_context: 0 &dev->mutex sound_oss_mutex sound_loader_lock irq_context: 0 &dev->mutex sound_oss_mutex &c->lock irq_context: 0 &dev->mutex sound_oss_mutex &____s->seqcount irq_context: 0 &dev->mutex sound_oss_mutex &x->wait#9 irq_context: 0 &dev->mutex sound_oss_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex sound_oss_mutex &k->list_lock irq_context: 0 &dev->mutex sound_oss_mutex lock irq_context: 0 &dev->mutex sound_oss_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex sound_oss_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex sound_oss_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex sound_oss_mutex bus_type_sem irq_context: 0 &dev->mutex sound_oss_mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex sound_oss_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex sound_oss_mutex &zone->lock irq_context: 0 &dev->mutex sound_oss_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex sound_oss_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex sound_oss_mutex &dev->power.lock irq_context: 0 &dev->mutex sound_oss_mutex dpm_list_mtx irq_context: 0 &dev->mutex sound_oss_mutex req_lock irq_context: 0 &dev->mutex sound_oss_mutex &p->pi_lock irq_context: 0 &dev->mutex sound_oss_mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex sound_oss_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex sound_oss_mutex &rq->__lock irq_context: 0 &dev->mutex sound_oss_mutex &cfs_rq->removed.lock irq_context: 0 &dev->mutex sound_oss_mutex &x->wait#11 irq_context: 0 &dev->mutex sound_oss_mutex uevent_sock_mutex irq_context: 0 &dev->mutex sound_oss_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex sound_oss_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex sound_oss_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex sound_oss_mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex sound_oss_mutex &k->k_lock irq_context: 0 &dev->mutex sound_oss_mutex subsys mutex#76 irq_context: 0 &dev->mutex sound_oss_mutex subsys mutex#76 &k->k_lock irq_context: 0 &dev->mutex strings irq_context: 0 &dev->mutex strings fs_reclaim irq_context: 0 &dev->mutex strings fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex strings &c->lock irq_context: 0 &dev->mutex strings &pcp->lock &zone->lock irq_context: 0 &dev->mutex strings &zone->lock irq_context: 0 &dev->mutex strings &____s->seqcount irq_context: 0 &dev->mutex strings pool_lock#2 irq_context: 0 &dev->mutex &card->controls_rwsem fs_reclaim irq_context: 0 &dev->mutex &card->controls_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &card->controls_rwsem pool_lock#2 irq_context: 0 &dev->mutex &card->controls_rwsem &xa->xa_lock#11 &zone->lock irq_context: 0 &dev->mutex &card->controls_rwsem &rq->__lock irq_context: 0 &dev->mutex &card->controls_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex &card->controls_rwsem &cfs_rq->removed.lock irq_context: 0 &dev->mutex &card->controls_rwsem &obj_hash[i].lock irq_context: 0 &dev->mutex &card->controls_rwsem pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex register_mutex#2 sound_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &zone->lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &cfs_rq->removed.lock irq_context: 0 &dev->mutex sound_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &pcp->lock &zone->lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &zone->lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex register_mutex#5 irq_context: 0 &dev->mutex sound_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex sound_mutex &zone->lock irq_context: 0 &dev->mutex sound_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex register_mutex#3 irq_context: 0 &dev->mutex register_mutex#3 fs_reclaim irq_context: 0 &dev->mutex register_mutex#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex register_mutex#3 pool_lock#2 irq_context: 0 &dev->mutex register_mutex#3 clients_lock irq_context: 0 &dev->mutex clients_lock irq_context: 0 &dev->mutex &client->ports_lock irq_context: 0 &dev->mutex &grp->list_mutex/1 irq_context: 0 &dev->mutex &grp->list_mutex/1 clients_lock irq_context: 0 &dev->mutex &grp->list_mutex/1 &client->ports_lock irq_context: 0 &dev->mutex &client->ports_mutex irq_context: 0 &dev->mutex &client->ports_mutex &client->ports_lock irq_context: 0 &dev->mutex &grp->list_mutex/1 register_lock#3 irq_context: 0 &dev->mutex &grp->list_mutex/1 fs_reclaim irq_context: 0 &dev->mutex &grp->list_mutex/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &grp->list_mutex/1 &c->lock irq_context: 0 &dev->mutex &grp->list_mutex/1 &pcp->lock &zone->lock irq_context: 0 &dev->mutex &grp->list_mutex/1 &zone->lock irq_context: 0 &dev->mutex &grp->list_mutex/1 &____s->seqcount irq_context: 0 &dev->mutex &grp->list_mutex/1 pool_lock#2 irq_context: 0 &dev->mutex sound_oss_mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex sound_oss_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex sound_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex sound_mutex lock kernfs_idr_lock &c->lock irq_context: 0 &dev->mutex sound_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 &dev->mutex sound_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex register_mutex#3 &c->lock irq_context: 0 &dev->mutex register_mutex#3 &pcp->lock &zone->lock irq_context: 0 &dev->mutex register_mutex#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex register_mutex#3 &____s->seqcount irq_context: 0 &dev->mutex sound_oss_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex failover_lock irq_context: 0 llc_sap_list_lock irq_context: 0 llc_sap_list_lock pool_lock#2 irq_context: 0 act_id_mutex irq_context: 0 act_id_mutex fs_reclaim irq_context: 0 act_id_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 act_id_mutex pool_lock#2 irq_context: 0 act_mod_lock irq_context: 0 ife_mod_lock irq_context: 0 pernet_ops_rwsem nf_connlabels_lock irq_context: 0 cls_mod_lock irq_context: 0 ematch_mod_lock irq_context: 0 sock_diag_table_mutex irq_context: 0 nfnl_subsys_acct irq_context: 0 nfnl_subsys_queue irq_context: 0 nfnl_subsys_ulog irq_context: 0 nf_log_mutex irq_context: 0 nfnl_subsys_osf irq_context: 0 nf_sockopt_mutex irq_context: 0 nfnl_subsys_ctnetlink irq_context: 0 nfnl_subsys_ctnetlink_exp irq_context: 0 pernet_ops_rwsem nf_ct_ecache_mutex irq_context: 0 nfnl_subsys_cttimeout irq_context: 0 nfnl_subsys_cthelper irq_context: 0 nf_ct_helper_mutex irq_context: 0 rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 nf_conntrack_expect_lock irq_context: 0 net_rwsem irq_context: 0 nf_conntrack_mutex irq_context: 0 pernet_ops_rwsem nf_log_mutex irq_context: 0 pernet_ops_rwsem batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem kfence_freelist_lock irq_context: 0 nf_ct_nat_helpers_mutex irq_context: 0 nfnl_subsys_nftables irq_context: 0 nfnl_subsys_nftcompat irq_context: 0 masq_mutex irq_context: 0 masq_mutex pernet_ops_rwsem irq_context: 0 masq_mutex pernet_ops_rwsem rtnl_mutex irq_context: 0 masq_mutex (inetaddr_chain).rwsem irq_context: 0 masq_mutex inet6addr_chain.lock irq_context: 0 &xt[i].mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex &tn->lock irq_context: 0 subsys mutex#77 irq_context: 0 subsys mutex#77 &k->k_lock irq_context: 0 nfnl_subsys_ipset irq_context: 0 ip_set_type_mutex irq_context: 0 pernet_ops_rwsem ipvs->est_mutex irq_context: 0 pernet_ops_rwsem ipvs->est_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem ipvs->est_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem &base->lock irq_context: 0 pernet_ops_rwsem &base->lock &obj_hash[i].lock irq_context: 0 ip_vs_sched_mutex irq_context: 0 (wq_completion)events (debug_obj_work).work pool_lock#2 irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex pool_lock#2 irq_context: 0 ip_vs_pe_mutex irq_context: 0 tunnel4_mutex irq_context: 0 pernet_ops_rwsem net_generic_ids.xa_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 xfrm4_protocol_mutex irq_context: 0 &xt[i].mutex fs_reclaim irq_context: 0 &xt[i].mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &xt[i].mutex pool_lock#2 irq_context: 0 inet_diag_table_mutex irq_context: 0 xfrm_km_lock irq_context: 0 xfrm_translator_lock irq_context: 0 xfrm6_protocol_mutex irq_context: 0 tunnel6_mutex irq_context: 0 xfrm_if_cb_lock irq_context: 0 inetsw6_lock irq_context: 0 &hashinfo->lock#2 irq_context: 0 pernet_ops_rwsem &hashinfo->lock#2 irq_context: 0 pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &zone->lock irq_context: 0 (wq_completion)ipv6_addrconf irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (crypto_chain).rwsem irq_context: 0 (crypto_chain).rwsem fs_reclaim irq_context: 0 (crypto_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (crypto_chain).rwsem pool_lock#2 irq_context: 0 (crypto_chain).rwsem kthread_create_lock irq_context: 0 (crypto_chain).rwsem &p->pi_lock irq_context: 0 (crypto_chain).rwsem &p->pi_lock &rq->__lock irq_context: 0 (crypto_chain).rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (crypto_chain).rwsem &x->wait irq_context: 0 (crypto_chain).rwsem &rq->__lock irq_context: 0 (crypto_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (crypto_chain).rwsem &obj_hash[i].lock irq_context: 0 (crypto_chain).rwsem &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &x->wait#21 irq_context: 0 &x->wait#21 &p->pi_lock irq_context: 0 &x->wait#21 &p->pi_lock &rq->__lock irq_context: 0 &x->wait#21 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->alloc_lock &x->wait irq_context: 0 (crypto_chain).rwsem &c->lock irq_context: 0 (crypto_chain).rwsem &pcp->lock &zone->lock irq_context: 0 (crypto_chain).rwsem &zone->lock irq_context: 0 (crypto_chain).rwsem &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 stp_proto_mutex irq_context: 0 stp_proto_mutex llc_sap_list_lock irq_context: 0 stp_proto_mutex llc_sap_list_lock batched_entropy_u8.lock irq_context: 0 stp_proto_mutex llc_sap_list_lock kfence_freelist_lock irq_context: 0 switchdev_notif_chain.lock irq_context: 0 (switchdev_blocking_notif_chain).rwsem irq_context: 0 br_ioctl_mutex irq_context: 0 nf_ct_proto_mutex irq_context: 0 ebt_mutex irq_context: 0 ebt_mutex fs_reclaim irq_context: 0 ebt_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ebt_mutex pool_lock#2 irq_context: 0 dsa_tag_drivers_lock irq_context: 0 rtnl_mutex &tn->lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 rtnl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &dentry->d_lock &wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &dentry->d_lock &wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &dentry->d_lock &wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 protocol_list_lock irq_context: 0 linkfail_lock irq_context: 0 rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock &zone->lock irq_context: 0 rtnl_mutex dev_hotplug_mutex &rq->__lock irq_context: 0 rtnl_mutex dev_hotplug_mutex &cfs_rq->removed.lock irq_context: 0 rtnl_mutex dev_hotplug_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_hotplug_mutex pool_lock#2 irq_context: 0 rtnl_mutex lock kernfs_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rose_neigh_list_lock irq_context: 0 proto_tab_lock#2 irq_context: 0 bt_proto_lock irq_context: 0 bt_proto_lock pool_lock#2 irq_context: 0 bt_proto_lock &dir->lock irq_context: 0 bt_proto_lock &obj_hash[i].lock irq_context: 0 bt_proto_lock chan_list_lock irq_context: 0 bt_proto_lock l2cap_sk_list.lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP slock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP chan_list_lock irq_context: 0 slock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 rfcomm_wq.lock irq_context: 0 rfcomm_mutex irq_context: 0 auth_domain_lock irq_context: 0 registered_mechs_lock irq_context: 0 (crypto_chain).rwsem rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 reading_mutex &x->wait#12 irq_context: 0 reading_mutex &rq->__lock irq_context: 0 reading_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq &x->wait#12 &p->pi_lock irq_context: 0 atm_dev_notify_chain.lock irq_context: 0 genl_mutex irq_context: 0 proto_tab_lock#3 irq_context: 0 vlan_ioctl_mutex irq_context: 0 pernet_ops_rwsem (console_sem).lock irq_context: 0 pernet_ops_rwsem console_lock console_srcu console_owner_lock irq_context: 0 pernet_ops_rwsem console_lock console_srcu console_owner irq_context: 0 pernet_ops_rwsem console_lock console_srcu console_owner &port_lock_key irq_context: 0 pernet_ops_rwsem console_lock console_srcu console_owner console_owner_lock irq_context: hardirq allocation_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &ndev->lock irq_context: 0 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rds_info_lock irq_context: 0 rds_trans_sem irq_context: 0 rds_trans_sem (console_sem).lock irq_context: 0 rds_trans_sem console_lock console_srcu console_owner_lock irq_context: 0 rds_trans_sem console_lock console_srcu console_owner irq_context: 0 rds_trans_sem console_lock console_srcu console_owner &port_lock_key irq_context: 0 rds_trans_sem console_lock console_srcu console_owner console_owner_lock irq_context: 0 &id_priv->lock irq_context: 0 lock#7 irq_context: 0 lock#7 fs_reclaim irq_context: 0 lock#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 lock#7 pool_lock#2 irq_context: 0 lock#7 &xa->xa_lock#12 irq_context: 0 lock#7 &xa->xa_lock#12 pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &zone->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &____s->seqcount irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &zone->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &____s->seqcount irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &h->lhash2[i].lock irq_context: 0 rds_trans_sem &rq->__lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 pernet_ops_rwsem wq_pool_mutex irq_context: 0 pernet_ops_rwsem wq_pool_mutex &wq->mutex irq_context: 0 pernet_ops_rwsem pcpu_lock irq_context: 0 pernet_ops_rwsem &list->lock#4 irq_context: 0 pernet_ops_rwsem &dir->lock#2 irq_context: 0 pernet_ops_rwsem ptype_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &c->lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &zone->lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_read_lock rhashtable_bucket irq_context: 0 pernet_ops_rwsem k-clock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC k-slock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-slock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &pnettable->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex smc_ib_devices.mutex irq_context: 0 smc_wr_rx_hash_lock irq_context: 0 v9fs_trans_lock irq_context: 0 pernet_ops_rwsem &this->receive_lock irq_context: 0 &x->wait#17 &p->pi_lock irq_context: 0 &x->wait#17 &p->pi_lock &rq->__lock irq_context: 0 &x->wait#17 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &x->wait#17 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 lowpan_nhc_lock irq_context: 0 ovs_mutex irq_context: 0 pernet_ops_rwsem once_lock irq_context: 0 pernet_ops_rwsem once_lock crngs.lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &zone->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &obj_hash[i].lock irq_context: 0 misc_mtx &p->pi_lock &cfs_rq->removed.lock irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &dev->mutex &k->list_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &dev->mutex &k->k_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &dev->mutex &dev->power.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up subsys mutex#78 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &base->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &base->lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock static_call_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 tasklist_lock &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock batched_entropy_u8.lock irq_context: 0 &mm->mmap_lock kfence_freelist_lock irq_context: 0 &sig->cred_guard_mutex &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &sig->cred_guard_mutex &dentry->d_lock/1 irq_context: 0 kernfs_idr_lock &obj_hash[i].lock irq_context: 0 kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 fs_reclaim &cfs_rq->removed.lock irq_context: 0 fs_reclaim &obj_hash[i].lock irq_context: 0 kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: softirq &(&gc_work->dwork)->timer irq_context: softirq &(&gc_work->dwork)->timer rcu_read_lock &pool->lock irq_context: softirq &(&gc_work->dwork)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&gc_work->dwork)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&gc_work->dwork)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&gc_work->dwork)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_node_0 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 cgroup_threadgroup_rwsem &cfs_rq->removed.lock irq_context: 0 cgroup_threadgroup_rwsem &obj_hash[i].lock irq_context: 0 cgroup_threadgroup_rwsem pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq &(&ipvs->defense_work)->timer irq_context: softirq &(&ipvs->defense_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&ipvs->defense_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &s->s_inode_list_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &ipvs->dropentry_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &ipvs->droppacket_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &ipvs->securetcp_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &base->lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &cfs_rq->removed.lock irq_context: 0 &root->kernfs_rwsem &base->lock irq_context: 0 &root->kernfs_rwsem &base->lock &obj_hash[i].lock irq_context: 0 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&net->can.stattimer) irq_context: softirq (&net->can.stattimer) &obj_hash[i].lock irq_context: softirq (&net->can.stattimer) &base->lock irq_context: softirq (&net->can.stattimer) &base->lock &obj_hash[i].lock irq_context: 0 &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 &root->kernfs_rwsem &root->kernfs_iattr_rwsem &cfs_rq->removed.lock irq_context: 0 &root->kernfs_rwsem &root->kernfs_iattr_rwsem &obj_hash[i].lock irq_context: 0 &mm->mmap_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &base->lock irq_context: 0 &sig->cred_guard_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &obj_hash[i].lock pool_lock irq_context: softirq &(&tbl->managed_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&tbl->managed_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&vblank->disable_timer) irq_context: softirq (&vblank->disable_timer) &dev->vbl_lock irq_context: softirq (&vblank->disable_timer) &dev->vbl_lock &dev->vblank_time_lock irq_context: softirq (&vblank->disable_timer) &dev->vbl_lock &dev->vblank_time_lock hrtimer_bases.lock irq_context: softirq (&vblank->disable_timer) &dev->vbl_lock &dev->vblank_time_lock hrtimer_bases.lock &obj_hash[i].lock irq_context: softirq (&vblank->disable_timer) &dev->vbl_lock &dev->vblank_time_lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events drain_vmap_work irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock purge_vmap_area_lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock pool_lock#2 irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock &obj_hash[i].lock irq_context: 0 &root->kernfs_rwsem quarantine_lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &rq->__lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock pool_lock#2 irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&q->timeout) irq_context: softirq (&q->timeout) rcu_read_lock &pool->lock irq_context: softirq (&q->timeout) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&q->timeout) rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq (&q->timeout) rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&q->timeout) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&q->timeout) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)kblockd (work_completion)(&q->timeout_work) irq_context: 0 uevent_sock_mutex &cfs_rq->removed.lock irq_context: 0 uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback quarantine_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_node_0 irq_context: 0 cb_lock genl_mutex &c->lock irq_context: 0 cb_lock genl_mutex &pcp->lock &zone->lock irq_context: 0 cb_lock genl_mutex &zone->lock irq_context: 0 cb_lock genl_mutex &____s->seqcount irq_context: 0 lock map_idr_lock irq_context: 0 lock map_idr_lock pool_lock#2 irq_context: 0 &sb->s_type->i_lock_key#15 &dentry->d_lock irq_context: 0 purge_vmap_area_lock irq_context: 0 lock prog_idr_lock irq_context: 0 lock prog_idr_lock pool_lock#2 irq_context: 0 bpf_lock irq_context: 0 rcu_read_lock_trace fs_reclaim irq_context: 0 rcu_read_lock_trace fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rcu_read_lock_trace pool_lock#2 irq_context: 0 rcu_read_lock_trace &obj_hash[i].lock irq_context: 0 rcu_read_lock_trace &c->lock irq_context: 0 rcu_read_lock_trace &____s->seqcount irq_context: 0 rcu_read_lock_trace lock irq_context: 0 rcu_read_lock_trace lock btf_idr_lock irq_context: 0 rcu_read_lock_trace lock btf_idr_lock pool_lock#2 irq_context: 0 rcu_read_lock_trace &newf->file_lock irq_context: 0 rcu_read_lock_trace &sb->s_type->i_lock_key#15 irq_context: 0 rcu_read_lock_trace &sb->s_type->i_lock_key#15 &dentry->d_lock irq_context: 0 rcu_read_lock_trace free_vmap_area_lock irq_context: 0 rcu_read_lock_trace vmap_area_lock irq_context: 0 rcu_read_lock_trace &pcp->lock &zone->lock irq_context: 0 rcu_read_lock_trace &zone->lock irq_context: 0 rcu_read_lock_trace lock map_idr_lock irq_context: 0 rcu_read_lock_trace rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock_trace &map->freeze_mutex irq_context: 0 key_types_sem irq_context: 0 key_types_sem asymmetric_key_parsers_sem irq_context: 0 key_types_sem asymmetric_key_parsers_sem fs_reclaim irq_context: 0 key_types_sem asymmetric_key_parsers_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 key_types_sem asymmetric_key_parsers_sem pool_lock#2 irq_context: 0 key_types_sem asymmetric_key_parsers_sem crypto_alg_sem irq_context: 0 key_types_sem asymmetric_key_parsers_sem &obj_hash[i].lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem crypto_alg_sem irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem fs_reclaim irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem pool_lock#2 irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem kthread_create_lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &p->pi_lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &p->pi_lock &cfs_rq->removed.lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &p->pi_lock &rq->__lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &rq->__lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &x->wait irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &obj_hash[i].lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem &x->wait#21 irq_context: 0 key_types_sem asymmetric_key_parsers_sem &base->lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem &base->lock &obj_hash[i].lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem &rq->__lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 key_types_sem asymmetric_key_parsers_sem (&timer.timer) irq_context: 0 key_types_sem asymmetric_key_parsers_sem &c->lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem &____s->seqcount irq_context: 0 key_types_sem &type->lock_class irq_context: 0 key_types_sem &type->lock_class fs_reclaim irq_context: 0 key_types_sem &type->lock_class fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 key_types_sem &type->lock_class pool_lock#2 irq_context: 0 key_types_sem &type->lock_class key_user_lock irq_context: 0 key_types_sem &type->lock_class crngs.lock irq_context: 0 key_types_sem &type->lock_class key_serial_lock irq_context: 0 key_types_sem &type->lock_class key_construction_mutex irq_context: 0 key_types_sem &type->lock_class key_construction_mutex &obj_hash[i].lock irq_context: 0 key_types_sem &type->lock_class key_construction_mutex pool_lock#2 irq_context: 0 key_types_sem &type->lock_class ima_keys_lock irq_context: 0 key_types_sem &obj_hash[i].lock irq_context: 0 key_types_sem pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#3 batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#3 kfence_freelist_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &n->list_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex crypto_alg_sem irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock fs_reclaim irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock &c->lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock &n->list_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock &pcp->lock &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock &____s->seqcount irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock free_vmap_area_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock vmap_area_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock init_mm.page_table_lock irq_context: 0 slab_mutex lock irq_context: 0 slab_mutex lock kernfs_idr_lock irq_context: 0 slab_mutex &root->kernfs_rwsem irq_context: 0 slab_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 slab_mutex &k->list_lock irq_context: 0 slab_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 slab_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 slab_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 slab_mutex lock kernfs_idr_lock &c->lock irq_context: 0 slab_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 slab_mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 slab_mutex lock kernfs_idr_lock &zone->lock irq_context: 0 slab_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 slab_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 slab_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &base->lock &obj_hash[i].lock irq_context: 0 slab_mutex lock kernfs_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pcpu_drain_mutex &pcp->lock irq_context: 0 pcpu_drain_mutex &pcp->lock &zone->lock irq_context: 0 pcpu_drain_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: softirq mm/memcontrol.c:589 rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq &(&ipvs->defense_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&ipvs->defense_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&ipvs->defense_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#3 batched_entropy_u8.lock crngs.lock irq_context: 0 &sb->s_type->i_mutex_key#3 batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC k-slock-AF_RXRPC irq_context: 0 pernet_ops_rwsem k-slock-AF_RXRPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex crngs.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &sb->s_type->i_lock_key#8 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &dir->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &rq->__lock irq_context: 0 (wq_completion)events netstamp_work irq_context: 0 (wq_completion)events netstamp_work cpu_hotplug_lock irq_context: 0 (wq_completion)events netstamp_work cpu_hotplug_lock jump_label_mutex irq_context: 0 (wq_completion)events netstamp_work cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 (wq_completion)events netstamp_work cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex kthread_create_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &x->wait irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &zone->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &x->wait#22 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &x->wait#22 irq_context: 0 &x->wait#22 &p->pi_lock irq_context: 0 &x->wait#22 &p->pi_lock &rq->__lock irq_context: 0 &x->wait#22 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &local->services_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC fs_reclaim irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC pool_lock#2 irq_context: 0 pernet_ops_rwsem &rxnet->conn_lock irq_context: 0 pernet_ops_rwsem &call->waitq irq_context: 0 pernet_ops_rwsem &rx->call_lock irq_context: 0 pernet_ops_rwsem &rxnet->call_lock irq_context: 0 bio_slab_lock slab_mutex &____s->seqcount irq_context: 0 bio_slab_lock slab_mutex &root->kernfs_rwsem irq_context: 0 bio_slab_lock slab_mutex &k->list_lock irq_context: 0 bio_slab_lock slab_mutex lock irq_context: 0 bio_slab_lock slab_mutex lock kernfs_idr_lock irq_context: 0 bio_slab_lock slab_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 bio_slab_lock slab_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 bio_slab_lock &pcp->lock &zone->lock irq_context: 0 bio_slab_lock &zone->lock irq_context: 0 bio_slab_lock &____s->seqcount irq_context: 0 bio_slab_lock rcu_read_lock pool_lock#2 irq_context: 0 bio_slab_lock &obj_hash[i].lock irq_context: softirq (&rxnet->peer_keepalive_timer) irq_context: softirq (&rxnet->peer_keepalive_timer) rcu_read_lock &pool->lock/1 irq_context: softirq (&rxnet->peer_keepalive_timer) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&rxnet->peer_keepalive_timer) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)krxrpcd irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->peer_keepalive_work) irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->peer_keepalive_work) &rxnet->peer_hash_lock irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->peer_keepalive_work) &obj_hash[i].lock irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->peer_keepalive_work) &base->lock irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->peer_keepalive_work) &base->lock &obj_hash[i].lock irq_context: 0 init_user_ns.keyring_sem irq_context: 0 init_user_ns.keyring_sem key_user_lock irq_context: 0 init_user_ns.keyring_sem root_key_user.lock irq_context: 0 init_user_ns.keyring_sem fs_reclaim irq_context: 0 init_user_ns.keyring_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 init_user_ns.keyring_sem pool_lock#2 irq_context: 0 init_user_ns.keyring_sem crngs.lock irq_context: 0 init_user_ns.keyring_sem key_serial_lock irq_context: 0 init_user_ns.keyring_sem key_construction_mutex irq_context: 0 init_user_ns.keyring_sem &type->lock_class irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock fs_reclaim irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock pool_lock#2 irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock root_key_user.lock irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock key_construction_mutex irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock key_construction_mutex keyring_name_lock irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock key_construction_mutex &obj_hash[i].lock irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock key_construction_mutex pool_lock#2 irq_context: 0 init_user_ns.keyring_sem keyring_serialise_link_lock irq_context: 0 init_user_ns.keyring_sem key_construction_mutex keyring_name_lock irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock &obj_hash[i].lock irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock &pcp->lock &zone->lock irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock &zone->lock irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock &____s->seqcount irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock rcu_read_lock pool_lock#2 irq_context: 0 template_list irq_context: 0 idr_lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem &pcp->lock &zone->lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem &zone->lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem rcu_read_lock pool_lock#2 irq_context: 0 key_types_sem &type->lock_class &c->lock irq_context: 0 key_types_sem &type->lock_class &____s->seqcount irq_context: 0 ima_extend_list_mutex irq_context: 0 ima_extend_list_mutex fs_reclaim irq_context: 0 ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ima_extend_list_mutex pool_lock#2 irq_context: softirq &(&ipvs->defense_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pci_bus_sem irq_context: 0 clk_debug_lock irq_context: 0 (wq_completion)events_unbound deferred_probe_work irq_context: 0 (wq_completion)events_unbound deferred_probe_work deferred_probe_mutex irq_context: 0 deferred_probe_work irq_context: 0 dpm_list_mtx (console_sem).lock irq_context: 0 dpm_list_mtx console_lock console_srcu console_owner_lock irq_context: 0 dpm_list_mtx console_lock console_srcu console_owner irq_context: 0 dpm_list_mtx console_lock console_srcu console_owner &port_lock_key irq_context: 0 dpm_list_mtx console_lock console_srcu console_owner console_owner_lock irq_context: 0 console_mutex &root->kernfs_rwsem irq_context: 0 console_mutex kernfs_notify_lock irq_context: 0 console_mutex kernfs_notify_lock rcu_read_lock &pool->lock irq_context: 0 console_mutex kernfs_notify_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 console_mutex kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 console_mutex kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 console_mutex kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 console_mutex kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 console_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 k-sk_lock-AF_INET irq_context: 0 k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 k-sk_lock-AF_INET &table->hash[i].lock irq_context: 0 k-sk_lock-AF_INET &table->hash[i].lock k-clock-AF_INET irq_context: 0 k-sk_lock-AF_INET &table->hash[i].lock &table->hash2[i].lock irq_context: 0 k-slock-AF_INET irq_context: 0 k-sk_lock-AF_INET6 irq_context: 0 k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 k-slock-AF_INET6 irq_context: 0 k-sk_lock-AF_INET6 &table->hash[i].lock irq_context: 0 k-sk_lock-AF_INET6 &table->hash[i].lock k-clock-AF_INET6 irq_context: 0 k-sk_lock-AF_INET6 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &c->lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &pcp->lock &zone->lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &zone->lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &____s->seqcount irq_context: 0 reg_requests_lock irq_context: 0 (wq_completion)events reg_work irq_context: 0 system_transition_mutex/1 irq_context: 0 (wq_completion)events reg_work rtnl_mutex irq_context: 0 &wq->mutex &pool->lock/1 irq_context: 0 (wq_completion)events reg_work rtnl_mutex reg_requests_lock irq_context: 0 &wq->mutex &x->wait#10 irq_context: 0 (wq_completion)events reg_work rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)events reg_work rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex pool_lock#2 irq_context: 0 &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &obj_hash[i].lock irq_context: 0 &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events reg_work rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)events reg_work rtnl_mutex reg_pending_beacons_lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &rdev->wiphy.mtx irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &obj_hash[i].lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &rdev->wiphy.mtx rcu_node_0 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &base->lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &rdev->wiphy.mtx &rcu_state.expedited_wq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &rdev->wiphy.mtx &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &pool->lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &rdev->wiphy.mtx &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &rq->__lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &rdev->wiphy.mtx &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events reg_work rtnl_mutex &rdev->wiphy.mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) (&timer.timer) irq_context: 0 (wq_completion)events reg_work rtnl_mutex &rdev->wiphy.mtx reg_requests_lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &base->lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 acpi_gpio_deferred_req_irqs_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &fw_cache.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &fw_cache.lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) async_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) init_task.alloc_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) init_task.alloc_lock init_fs.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) init_task.alloc_lock init_fs.lock &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rcu_read_lock &____s->seqcount#4 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key &dentry->d_lock &wq irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &base->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &c->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key &c->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) (console_sem).lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) console_owner irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) console_lock console_srcu console_owner console_owner_lock irq_context: softirq fs/file_table.c:368 irq_context: softirq fs/file_table.c:368 rcu_read_lock &pool->lock irq_context: softirq fs/file_table.c:368 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem usermodehelper_disabled_waitq.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &x->wait#9 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &k->list_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex &k->list_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem lock kernfs_idr_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &root->kernfs_rwsem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem bus_type_sem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem sysfs_symlink_target_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &root->kernfs_rwsem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &c->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &dev->power.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem dpm_list_mtx irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &k->k_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem subsys mutex#79 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem subsys mutex#79 &k->k_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem fw_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem running_helpers_waitq.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &x->wait#23 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &base->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (delayed_fput_work).work irq_context: 0 (wq_completion)events (delayed_fput_work).work &obj_hash[i].lock irq_context: 0 (wq_completion)events (delayed_fput_work).work pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_lock_key#2 irq_context: 0 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 tomoyo_ss &obj_hash[i].lock irq_context: 0 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 tomoyo_ss tomoyo_log_lock irq_context: 0 tomoyo_ss tomoyo_log_wait.lock irq_context: 0 tomoyo_ss &c->lock irq_context: 0 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 tomoyo_ss &zone->lock irq_context: 0 tomoyo_ss &____s->seqcount irq_context: 0 cdev_lock irq_context: 0 tty_mutex (console_sem).lock irq_context: 0 tty_mutex console_lock irq_context: 0 tty_mutex fs_reclaim irq_context: 0 tty_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 tty_mutex pool_lock#2 irq_context: 0 tty_mutex tty_ldiscs_lock irq_context: 0 tty_mutex &obj_hash[i].lock irq_context: 0 tty_mutex &k->list_lock irq_context: 0 tty_mutex &k->k_lock irq_context: 0 tty_mutex &tty->legacy_mutex irq_context: 0 tty_mutex &tty->legacy_mutex &tty->read_wait irq_context: 0 tty_mutex &tty->legacy_mutex &tty->write_wait irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem fs_reclaim irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem pool_lock#2 irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem free_vmap_area_lock irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem vmap_area_lock irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem &pcp->lock &zone->lock irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem &zone->lock irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem &____s->seqcount irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem init_mm.page_table_lock irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem &tty->write_wait irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem &tty->read_wait irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem &tty->termios_rwsem irq_context: 0 &tty->legacy_mutex irq_context: 0 &tty->legacy_mutex &tty->files_lock irq_context: 0 &tty->legacy_mutex &port->lock irq_context: 0 &tty->legacy_mutex &port->mutex irq_context: 0 &tty->legacy_mutex &port->mutex fs_reclaim irq_context: 0 &tty->legacy_mutex &port->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &tty->legacy_mutex &port->mutex &pcp->lock &zone->lock irq_context: 0 &tty->legacy_mutex &port->mutex &zone->lock irq_context: 0 &tty->legacy_mutex &port->mutex &____s->seqcount irq_context: 0 &tty->legacy_mutex &port->mutex pool_lock#2 irq_context: 0 &tty->legacy_mutex &port->mutex &port_lock_key irq_context: 0 &tty->legacy_mutex &port->mutex hash_mutex irq_context: 0 &tty->legacy_mutex &port->mutex hash_mutex fs_reclaim irq_context: 0 &tty->legacy_mutex &port->mutex hash_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &tty->legacy_mutex &port->mutex hash_mutex pool_lock#2 irq_context: 0 &tty->legacy_mutex &port->mutex &i->lock irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class vector_lock irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class ioapic_lock irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class mask_lock irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock vector_lock irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock ioapic_lock irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class ioapic_lock i8259A_lock irq_context: 0 &tty->legacy_mutex &port->mutex register_lock irq_context: 0 &tty->legacy_mutex &port->mutex &irq_desc_lock_class irq_context: 0 &tty->legacy_mutex &port->mutex proc_subdir_lock irq_context: 0 &tty->legacy_mutex &port->mutex proc_inum_ida.xa_lock irq_context: 0 &tty->legacy_mutex &port->mutex proc_subdir_lock irq_context: hardirq &i->lock irq_context: 0 &tty->legacy_mutex &port_lock_key irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock irq_context: 0 detected_devices_mutex irq_context: 0 sb_writers#2 irq_context: 0 sb_writers#2 mount_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 rename_lock.seqcount irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 fs_reclaim irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 pool_lock#2 irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &dentry->d_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &obj_hash[i].lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_log_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_log_wait.lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &c->lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &zone->lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_lock_key#2 irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &s->s_inode_list_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tk_core.seq.seqcount irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_lock_key#2 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key &dentry->d_lock &wq#2 irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_mutex_key irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_mutex_key tk_core.seq.seqcount irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_mutex_key rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_mutex_key &dentry->d_lock irq_context: 0 tomoyo_ss file_systems_lock irq_context: 0 tomoyo_ss fs_reclaim irq_context: 0 tomoyo_ss fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 tomoyo_ss rcu_read_lock init_fs.seq.seqcount irq_context: 0 &disk->open_mutex bdev_lock irq_context: 0 &bdev->bd_fsfreeze_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex sb_lock irq_context: 0 &bdev->bd_fsfreeze_mutex fs_reclaim irq_context: 0 &bdev->bd_fsfreeze_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &bdev->bd_fsfreeze_mutex &c->lock irq_context: 0 &bdev->bd_fsfreeze_mutex &____s->seqcount irq_context: 0 &bdev->bd_fsfreeze_mutex pool_lock#2 irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 fs_reclaim irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 pool_lock#2 irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 pcpu_alloc_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 shrinker_rwsem irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 list_lrus_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 sb_lock irq_context: 0 &type->s_umount_key#25/1 irq_context: 0 &type->s_umount_key#25/1 fs_reclaim irq_context: 0 &type->s_umount_key#25/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#25/1 &c->lock irq_context: 0 &type->s_umount_key#25/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#25/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#25/1 &____s->seqcount irq_context: 0 &type->s_umount_key#25/1 pool_lock#2 irq_context: 0 &type->s_umount_key#25/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#25/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#25/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#25/1 &wq->mutex irq_context: 0 &type->s_umount_key#25/1 &wq->mutex &pool->lock irq_context: 0 &type->s_umount_key#25/1 kthread_create_lock irq_context: 0 &type->s_umount_key#25/1 &p->pi_lock irq_context: 0 &type->s_umount_key#25/1 &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#25/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#25/1 &rq->__lock irq_context: 0 &type->s_umount_key#25/1 &x->wait irq_context: 0 &type->s_umount_key#25/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#25/1 wq_pool_mutex irq_context: 0 &type->s_umount_key#25/1 wq_pool_mutex &wq->mutex irq_context: 0 &type->s_umount_key#25/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#25/1 &xa->xa_lock#7 irq_context: 0 &type->s_umount_key#25/1 &xa->xa_lock#7 pool_lock#2 irq_context: 0 &type->s_umount_key#25/1 lock#4 irq_context: 0 &type->s_umount_key#25/1 &mapping->private_lock irq_context: 0 &type->s_umount_key#25/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#25/1 &dd->lock irq_context: 0 &type->s_umount_key#25/1 rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#25/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#25/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->s_umount_key#25/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#25/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#25/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#25/1 bit_wait_table + i irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: softirq bit_wait_table + i irq_context: softirq bit_wait_table + i &p->pi_lock irq_context: softirq bit_wait_table + i &p->pi_lock &rq->__lock irq_context: softirq bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq bit_wait_table + i &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &type->s_umount_key#25/1 &zone->lock irq_context: 0 &type->i_mutex_dir_key#3 irq_context: 0 &type->s_umount_key#25/1 &wq->mutex &x->wait#10 irq_context: 0 &type->s_umount_key#25/1 wq_mayday_lock irq_context: 0 &type->s_umount_key#25/1 &cfs_rq->removed.lock irq_context: 0 &type->s_umount_key#25/1 wq_pool_mutex &wq->mutex &pool->lock irq_context: 0 &type->s_umount_key#25/1 rcu_state.exp_mutex rcu_node_0 irq_context: 0 &type->s_umount_key#25/1 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &type->s_umount_key#25/1 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#25/1 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#25/1 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#25/1 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#25/1 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#25/1 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &type->s_umount_key#25/1 rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &type->s_umount_key#25/1 rcu_state.exp_mutex &rq->__lock irq_context: 0 &type->s_umount_key#25/1 &sbi->old_work_lock irq_context: 0 &type->s_umount_key#25/1 (work_completion)(&(&sbi->old_work)->work) irq_context: 0 &type->s_umount_key#25/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#25/1 sb_lock irq_context: 0 &xa->xa_lock#3 irq_context: 0 sb_lock &obj_hash[i].lock irq_context: 0 sb_lock pool_lock#2 irq_context: 0 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 fs_reclaim irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 pool_lock#2 irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 pcpu_alloc_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 shrinker_rwsem irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 list_lrus_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 sb_lock irq_context: 0 &type->s_umount_key#26/1 irq_context: 0 &type->s_umount_key#26/1 fs_reclaim irq_context: 0 &type->s_umount_key#26/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#26/1 pool_lock#2 irq_context: 0 &type->s_umount_key#26/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#26/1 &____s->seqcount irq_context: 0 &type->s_umount_key#26/1 &xa->xa_lock#7 irq_context: 0 &type->s_umount_key#26/1 lock#4 irq_context: 0 &type->s_umount_key#26/1 &mapping->private_lock irq_context: 0 &type->s_umount_key#26/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#26/1 &dd->lock irq_context: 0 &type->s_umount_key#26/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#26/1 rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#26/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#26/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->s_umount_key#26/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#26/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#26/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#26/1 bit_wait_table + i irq_context: 0 &type->s_umount_key#26/1 &rq->__lock irq_context: 0 &type->s_umount_key#26/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#26/1 &sb->s_type->i_lock_key#3 irq_context: 0 &type->s_umount_key#26/1 &sb->s_type->i_lock_key#3 &xa->xa_lock#7 irq_context: 0 &type->s_umount_key#26/1 lock#4 &lruvec->lru_lock irq_context: 0 &type->s_umount_key#26/1 lock#5 irq_context: 0 &type->s_umount_key#26/1 &lruvec->lru_lock irq_context: 0 &type->s_umount_key#26/1 rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#26/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#26/1 &zone->lock irq_context: 0 &type->s_umount_key#26/1 crypto_alg_sem irq_context: 0 &type->s_umount_key#26/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#26/1 sb_lock irq_context: 0 &bdev->bd_fsfreeze_mutex &pcp->lock &zone->lock irq_context: 0 &bdev->bd_fsfreeze_mutex &zone->lock irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 fs_reclaim irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 pcpu_alloc_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 shrinker_rwsem irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 list_lrus_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 sb_lock irq_context: 0 &type->s_umount_key#27/1 irq_context: 0 &type->s_umount_key#27/1 fs_reclaim irq_context: 0 &type->s_umount_key#27/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#27/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#27/1 &____s->seqcount irq_context: 0 &type->s_umount_key#27/1 &xa->xa_lock#7 irq_context: 0 &type->s_umount_key#27/1 lock#4 irq_context: 0 &type->s_umount_key#27/1 &mapping->private_lock irq_context: 0 &type->s_umount_key#27/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#27/1 &dd->lock irq_context: 0 &type->s_umount_key#27/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#27/1 bit_wait_table + i irq_context: 0 &type->s_umount_key#27/1 &rq->__lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &type->s_umount_key#27/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_lock_key#3 irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_lock_key#3 &xa->xa_lock#7 irq_context: 0 &type->s_umount_key#27/1 lock#4 &lruvec->lru_lock irq_context: 0 &type->s_umount_key#27/1 lock#5 irq_context: 0 &type->s_umount_key#27/1 &lruvec->lru_lock irq_context: 0 &type->s_umount_key#27/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#27/1 &zone->lock irq_context: 0 &type->s_umount_key#28/1 console_lock console_srcu console_owner irq_context: 0 &type->s_umount_key#28/1 console_lock console_srcu console_owner &port_lock_key irq_context: 0 &type->s_umount_key#28/1 console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) &rsp->gp_wait irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) pcpu_lock irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) &obj_hash[i].lock irq_context: 0 &type->s_umount_key#28/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_lock_key#22 irq_context: 0 &type->i_mutex_dir_key#3 fs_reclaim irq_context: 0 &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#3 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_es_lock irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#3 &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#3 &xa->xa_lock#7 irq_context: 0 &type->i_mutex_dir_key#3 &xa->xa_lock#7 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 lock#4 irq_context: 0 &type->i_mutex_dir_key#3 &c->lock irq_context: 0 &type->i_mutex_dir_key#3 &pcp->lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#3 &zone->lock irq_context: 0 &type->i_mutex_dir_key#3 &mapping->private_lock irq_context: 0 &type->i_mutex_dir_key#3 tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#3 &dd->lock irq_context: 0 &type->i_mutex_dir_key#3 &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 bit_wait_table + i irq_context: 0 &type->i_mutex_dir_key#3 &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 inode_hash_lock irq_context: 0 &type->i_mutex_dir_key#3 inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 &type->i_mutex_dir_key#3 inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->i_mutex_dir_key#3 &journal->j_state_lock irq_context: 0 &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 irq_context: 0 &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 &dentry->d_lock &wq irq_context: 0 &type->i_mutex_dir_key#3 irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem fs_reclaim irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem rename_lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem rename_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock mount_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock mount_lock.seqcount &new_ns->poll irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock mount_lock.seqcount &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock mount_lock.seqcount rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock mount_lock.seqcount &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock mount_lock.seqcount pool_lock#2 irq_context: 0 tomoyo_ss rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key namespace_sem &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem &obj_hash[i].lock irq_context: 0 vmap_purge_lock free_vmap_area_lock irq_context: 0 vmap_purge_lock free_vmap_area_lock &obj_hash[i].lock irq_context: 0 vmap_purge_lock free_vmap_area_lock pool_lock#2 irq_context: 0 vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock irq_context: 0 vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock rcu_read_lock pool_lock#2 irq_context: 0 vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock &obj_hash[i].lock irq_context: 0 rcu_state.barrier_mutex irq_context: 0 rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 rcu_state.barrier_mutex &x->wait#24 irq_context: 0 (init_mm).mmap_lock irq_context: softirq (&cb->timer) irq_context: softirq (&cb->timer) &obj_hash[i].lock irq_context: softirq (&cb->timer) &base->lock irq_context: softirq (&cb->timer) &base->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#29/1 irq_context: 0 &type->s_umount_key#29/1 fs_reclaim irq_context: 0 &type->s_umount_key#29/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#29/1 pool_lock#2 irq_context: 0 &type->s_umount_key#29/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#29/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#29/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#29/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#29/1 sb_lock irq_context: 0 &type->s_umount_key#29/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#29/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#29/1 &zone->lock irq_context: 0 &type->s_umount_key#29/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#29/1 &____s->seqcount irq_context: 0 &type->s_umount_key#29/1 &c->lock irq_context: 0 &type->s_umount_key#29/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#29/1 &sb->s_type->i_lock_key#23 irq_context: 0 &type->s_umount_key#29/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#29/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#29/1 &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 &type->s_umount_key#29/1 &sb->s_type->i_mutex_key#9 irq_context: 0 &type->s_umount_key#29/1 &sb->s_type->i_mutex_key#9 fs_reclaim irq_context: 0 &type->s_umount_key#29/1 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#29/1 &sb->s_type->i_mutex_key#9 pool_lock#2 irq_context: 0 &type->s_umount_key#29/1 &sb->s_type->i_mutex_key#9 &dentry->d_lock irq_context: 0 &type->s_umount_key#29/1 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#29/1 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 irq_context: 0 &type->s_umount_key#29/1 &sb->s_type->i_mutex_key#9 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#29/1 &sb->s_type->i_mutex_key#9 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#29/1 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 &type->s_umount_key#29/1 &dentry->d_lock irq_context: 0 rcu_read_lock &sb->s_type->i_lock_key#23 irq_context: 0 &sb->s_type->i_mutex_key#9 irq_context: 0 &sb->s_type->i_mutex_key#9 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#9 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#9 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 proc_subdir_lock irq_context: 0 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 irq_context: 0 &sb->s_type->i_mutex_key#9 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#9 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq irq_context: 0 &sb->s_type->i_mutex_key#9 sysctl_lock irq_context: 0 sb_writers#3 irq_context: 0 sb_writers#3 mount_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rename_lock.seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 pool_lock#2 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &dentry->d_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 sysctl_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &s->s_inode_list_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tk_core.seq.seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq#2 irq_context: 0 tomoyo_ss rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 irq_context: 0 sb_writers#3 sysctl_lock irq_context: 0 sb_writers#3 fs_reclaim irq_context: 0 sb_writers#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 pool_lock#2 irq_context: 0 sb_writers#3 &obj_hash[i].lock irq_context: 0 sb_writers#3 &h->resize_lock irq_context: 0 sb_writers#3 &h->resize_lock free_hpage_work irq_context: 0 sb_writers#3 &h->resize_lock hugetlb_lock irq_context: 0 sb_writers#3 &h->resize_lock fs_reclaim irq_context: 0 sb_writers#3 &h->resize_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 &h->resize_lock &pcp->lock &zone->lock irq_context: 0 sb_writers#3 &h->resize_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#3 &h->resize_lock &____s->seqcount irq_context: 0 sb_writers#3 &h->resize_lock pool_lock#2 irq_context: 0 sb_writers#3 hugetlb_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &dentry->d_lock &wq#2 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &obj_hash[i].lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex init_fs.lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock init_fs.seq.seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 fs_reclaim irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_es_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 inode_hash_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 inode_hash_lock &s->s_inode_list_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &xa->xa_lock#7 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &xa->xa_lock#7 pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 lock#4 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &mapping->private_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &zone->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &dd->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 lock#4 &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &c->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &dd->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 bit_wait_table + i irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &rq->__lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &journal->j_state_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 &dentry->d_lock &wq irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_lock_key#22 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rename_lock.seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 &dentry->d_lock &wq#2 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 irq_context: 0 &sig->cred_guard_mutex &p->pi_lock irq_context: 0 &sig->cred_guard_mutex aa_buffers_lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock mount_lock.seqcount irq_context: 0 &sig->cred_guard_mutex rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &xa->xa_lock#7 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock lock#4 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &xa->xa_lock#7 pool_lock#2 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_es_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem pool_lock#2 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &dd->lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &dd->lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &sig->cred_guard_mutex &folio_wait_table[i] irq_context: softirq &(&ovs_net->masks_rebalance)->timer irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss irq_context: 0 &sig->cred_guard_mutex tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &c->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &zone->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &____s->seqcount irq_context: 0 &sig->cred_guard_mutex tomoyo_ss pool_lock#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss fs_reclaim irq_context: 0 &sig->cred_guard_mutex tomoyo_ss fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock init_fs.seq.seqcount irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_log_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_log_wait.lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 &zone->lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 &c->lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 integrity_iint_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex irq_context: 0 &sig->cred_guard_mutex &iint->mutex &ei->xattr_sem irq_context: 0 &sig->cred_guard_mutex &iint->mutex fs_reclaim irq_context: 0 &sig->cred_guard_mutex &iint->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &iint->mutex pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &iint->mutex &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &iint->mutex rcu_read_lock mount_lock.seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex ima_extend_list_mutex irq_context: 0 &sig->cred_guard_mutex &iint->mutex ima_extend_list_mutex fs_reclaim irq_context: 0 &sig->cred_guard_mutex &iint->mutex ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &iint->mutex ima_extend_list_mutex pool_lock#2 irq_context: 0 &sig->cred_guard_mutex binfmt_lock irq_context: 0 &sig->cred_guard_mutex entries_lock irq_context: 0 &sig->cred_guard_mutex &dentry->d_lock &lru->node[i].lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: softirq (&cb->timer) tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem irq_context: 0 &sig->cred_guard_mutex &tsk->futex_exit_mutex irq_context: 0 &sig->cred_guard_mutex &tsk->futex_exit_mutex &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &p->alloc_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &p->alloc_lock &memcg->mm_list.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &sighand->siglock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &newf->file_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock batched_entropy_u64.lock irq_context: 0 batched_entropy_u16.lock irq_context: 0 batched_entropy_u16.lock crngs.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem pool_lock#2 irq_context: 0 &mm->mmap_lock &mm->page_table_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page) irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 ptlock_ptr(page)#2/1 irq_context: 0 &mm->mmap_lock lock#4 irq_context: 0 &mm->mmap_lock lock#4 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock lock#5 irq_context: 0 &mm->mmap_lock rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 irq_context: 0 &sb->s_type->i_mutex_key#8 integrity_iint_lock irq_context: 0 &iint->mutex irq_context: 0 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 integrity_iint_lock irq_context: 0 &iint->mutex &ei->xattr_sem irq_context: 0 &iint->mutex fs_reclaim irq_context: 0 &iint->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &iint->mutex pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock irq_context: 0 &iint->mutex mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 &iint->mutex mapping.invalidate_lock &pcp->lock &zone->lock irq_context: 0 &iint->mutex mapping.invalidate_lock &____s->seqcount irq_context: 0 &iint->mutex mapping.invalidate_lock pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 irq_context: 0 &iint->mutex mapping.invalidate_lock lock#4 irq_context: 0 &iint->mutex mapping.invalidate_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_es_lock irq_context: 0 &iint->mutex mapping.invalidate_lock &zone->lock irq_context: 0 &iint->mutex mapping.invalidate_lock &c->lock irq_context: 0 &iint->mutex mapping.invalidate_lock tk_core.seq.seqcount irq_context: 0 &iint->mutex mapping.invalidate_lock &dd->lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &dd->lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock tk_core.seq.seqcount irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &iint->mutex mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &iint->mutex &folio_wait_table[i] irq_context: 0 &iint->mutex &rq->__lock irq_context: 0 &iint->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq &rq->__lock pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock batched_entropy_u8.lock irq_context: 0 &iint->mutex mapping.invalidate_lock kfence_freelist_lock irq_context: 0 &iint->mutex &obj_hash[i].lock irq_context: 0 &iint->mutex mmu_notifier_invalidate_range_start irq_context: 0 &iint->mutex rcu_read_lock mount_lock.seqcount irq_context: 0 &iint->mutex rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &iint->mutex &c->lock irq_context: 0 &iint->mutex &pcp->lock &zone->lock irq_context: 0 &iint->mutex &zone->lock irq_context: 0 &iint->mutex &____s->seqcount irq_context: 0 &iint->mutex ima_extend_list_mutex irq_context: 0 &iint->mutex ima_extend_list_mutex fs_reclaim irq_context: 0 &iint->mutex ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &iint->mutex ima_extend_list_mutex pool_lock#2 irq_context: 0 binfmt_lock irq_context: 0 &dentry->d_lock &lru->node[i].lock irq_context: 0 &fsnotify_mark_srcu irq_context: 0 &type->s_umount_key#30 irq_context: 0 &type->s_umount_key#30 shrinker_rwsem irq_context: 0 &type->s_umount_key#30 rcu_read_lock &dentry->d_lock irq_context: 0 &type->s_umount_key#30 &dentry->d_lock irq_context: 0 &type->s_umount_key#30 &dentry->d_lock &lru->node[i].lock irq_context: 0 &type->s_umount_key#30 rename_lock.seqcount irq_context: 0 &type->s_umount_key#30 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &type->s_umount_key#30 &dentry->d_lock/1 irq_context: 0 &type->s_umount_key#30 &dentry->d_lock &dentry->d_lock/1 &lru->node[i].lock irq_context: 0 &type->s_umount_key#30 &sb->s_type->i_lock_key#23 irq_context: 0 &type->s_umount_key#30 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#30 &xa->xa_lock#7 irq_context: 0 &type->s_umount_key#30 sysctl_lock irq_context: 0 &type->s_umount_key#30 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#30 pool_lock#2 irq_context: 0 &type->s_umount_key#30 &fsnotify_mark_srcu irq_context: 0 &type->s_umount_key#30 &obj_hash[i].lock pool_lock irq_context: 0 &type->s_umount_key#30 sb_lock irq_context: 0 unnamed_dev_ida.xa_lock irq_context: 0 &xa->xa_lock#7 irq_context: 0 prog_idr_lock irq_context: 0 prog_idr_lock &obj_hash[i].lock irq_context: 0 prog_idr_lock pool_lock#2 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 map_idr_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 btf_idr_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 btf_idr_lock &obj_hash[i].lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 btf_idr_lock pool_lock#2 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) vmap_area_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) purge_vmap_area_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) pool_lock#2 irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 key irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#3 &dentry->d_lock &wq irq_context: 0 &vma->vm_lock->lock fs_reclaim irq_context: 0 &vma->vm_lock->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &vma->vm_lock->lock &pcp->lock &zone->lock irq_context: 0 &vma->vm_lock->lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &vma->vm_lock->lock &____s->seqcount irq_context: 0 &vma->vm_lock->lock pool_lock#2 irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 lock#4 irq_context: 0 &type->i_mutex_dir_key#3 rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#3 &dentry->d_lock &wq#2 irq_context: 0 &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 &dentry->d_lock &wq#2 irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_read_lock &pool->lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&aux->work) irq_context: 0 (wq_completion)events (work_completion)(&aux->work) map_idr_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) map_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) map_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex text_mutex irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pcpu_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) purge_vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &obj_hash[i].lock pool_lock irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 batched_entropy_u8.lock irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 kfence_freelist_lock irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_es_lock key#2 irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 &c->lock irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 &____s->seqcount irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page) irq_context: 0 &type->i_mutex_dir_key#3 lock#4 &lruvec->lru_lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &dd->lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &____s->seqcount irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &c->lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &____s->seqcount irq_context: 0 &iint->mutex ima_extend_list_mutex &c->lock irq_context: 0 &iint->mutex ima_extend_list_mutex &____s->seqcount irq_context: 0 &mm->mmap_lock &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem pool_lock#2 irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 key irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock irq_context: 0 &vma->vm_lock->lock mmu_notifier_invalidate_range_start irq_context: 0 &port->mutex irq_context: 0 &tty->ldisc_sem irq_context: 0 &tty->ldisc_sem &tty->termios_rwsem irq_context: 0 &tty->ldisc_sem &mm->mmap_lock irq_context: 0 &tty->ldisc_sem &tty->termios_rwsem irq_context: 0 &tty->ldisc_sem &tty->termios_rwsem &port->mutex irq_context: 0 &tty->ldisc_sem &tty->termios_rwsem &tty->ldisc_sem &tty->write_wait irq_context: 0 &tty->ldisc_sem &tty->termios_rwsem &tty->ldisc_sem &tty->read_wait irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_es_lock key#2 irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 integrity_iint_lock irq_context: 0 &type->s_umount_key#29/1 &sb->s_type->i_mutex_key#9 &____s->seqcount irq_context: 0 &type->s_umount_key#29/1 &sb->s_type->i_mutex_key#9 rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#29/1 &sb->s_type->i_mutex_key#9 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#9 rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq#2 irq_context: 0 &sb->s_type->i_lock_key#23 irq_context: 0 &p->lock irq_context: 0 &p->lock fs_reclaim irq_context: 0 &p->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock pool_lock#2 irq_context: 0 &p->lock &mm->mmap_lock irq_context: 0 &type->s_umount_key#31/1 irq_context: 0 &type->s_umount_key#31/1 fs_reclaim irq_context: 0 &type->s_umount_key#31/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#31/1 pool_lock#2 irq_context: 0 &type->s_umount_key#31/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#31/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#31/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#31/1 &c->lock irq_context: 0 &type->s_umount_key#31/1 &____s->seqcount irq_context: 0 &type->s_umount_key#31/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#31/1 sb_lock irq_context: 0 &type->s_umount_key#31/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#31/1 &root->kernfs_rwsem irq_context: 0 &type->s_umount_key#31/1 &root->kernfs_rwsem inode_hash_lock irq_context: 0 &type->s_umount_key#31/1 &root->kernfs_rwsem fs_reclaim irq_context: 0 &type->s_umount_key#31/1 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#31/1 &root->kernfs_rwsem pool_lock#2 irq_context: 0 &type->s_umount_key#31/1 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#31/1 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#24 irq_context: 0 &type->s_umount_key#31/1 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#31/1 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#31/1 &root->kernfs_rwsem &sb->s_type->i_lock_key#24 irq_context: 0 &type->s_umount_key#31/1 &sb->s_type->i_lock_key#24 irq_context: 0 &type->s_umount_key#31/1 &sb->s_type->i_lock_key#24 &dentry->d_lock irq_context: 0 &type->s_umount_key#31/1 &root->kernfs_supers_rwsem irq_context: 0 &type->s_umount_key#31/1 &dentry->d_lock irq_context: 0 &root->kernfs_iattr_rwsem irq_context: 0 &type->i_mutex_dir_key#4 irq_context: 0 &type->i_mutex_dir_key#4 fs_reclaim irq_context: 0 &type->i_mutex_dir_key#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#4 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#4 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem inode_hash_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem fs_reclaim irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#24 irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &sb->s_type->i_lock_key#24 irq_context: 0 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 irq_context: 0 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock &wq irq_context: 0 &type->i_mutex_dir_key#4 &dentry->d_lock &wq irq_context: 0 &ent->pde_unload_lock irq_context: 0 &p->lock file_systems_lock irq_context: 0 namespace_sem mount_lock mount_lock.seqcount &new_ns->poll irq_context: 0 namespace_sem mount_lock mount_lock.seqcount &dentry->d_lock irq_context: 0 namespace_sem mount_lock mount_lock.seqcount rcu_read_lock &dentry->d_lock irq_context: 0 namespace_sem mount_lock mount_lock.seqcount &obj_hash[i].lock irq_context: 0 namespace_sem mount_lock mount_lock.seqcount pool_lock#2 irq_context: 0 &x->wait#25 irq_context: 0 &mm->mmap_lock resource_lock irq_context: 0 &mm->mmap_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &net->unx.table.locks[i] irq_context: 0 &sb->s_type->i_lock_key#8 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#10 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->unx.table.locks[i] irq_context: 0 &sb->s_type->i_mutex_key#10 &u->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->lock clock-AF_UNIX irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_UNIX irq_context: 0 &sb->s_type->i_mutex_key#10 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &dir->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &c->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &____s->seqcount#3 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss quarantine_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &c->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &p->alloc_lock irq_context: 0 &sig->cred_guard_mutex &p->alloc_lock &x->wait#25 irq_context: 0 &sig->cred_guard_mutex &p->alloc_lock &x->wait#25 &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &p->alloc_lock &x->wait#25 &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &p->alloc_lock &x->wait#25 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->wait_chldexit irq_context: 0 tasklist_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &p->alloc_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &p->alloc_lock &memcg->mm_list.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &meta->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem kfence_freelist_lock irq_context: 0 &type->s_umount_key#29/1 &sb->s_type->i_mutex_key#9 &c->lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock irq_context: 0 &mm->mmap_lock &p->alloc_lock irq_context: 0 &mm->mmap_lock lock#4 irq_context: 0 &mm->mmap_lock lock#4 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock lock#5 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &____s->seqcount irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 pool_lock#2 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 key irq_context: 0 &mm->mmap_lock &lruvec->lru_lock irq_context: 0 &memcg->mm_list.lock irq_context: 0 tasklist_lock &sighand->siglock &____s->seqcount irq_context: 0 tasklist_lock &sighand->siglock pool_lock#2 irq_context: 0 tasklist_lock &sighand->siglock &c->lock irq_context: 0 rcu_read_lock &____s->seqcount#5 irq_context: 0 &prev->lock irq_context: 0 &sighand->siglock &(&sig->stats_lock)->lock irq_context: 0 &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 irq_context: 0 sb_writers#4 irq_context: 0 sb_writers#4 mount_lock irq_context: 0 &type->i_mutex_dir_key#3 rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#3 &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 integrity_iint_lock irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem &mapping->private_lock irq_context: 0 &mm->mmap_lock lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock lock#4 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#9 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 &c->lock irq_context: 0 sb_writers#3 tk_core.seq.seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_lock_key#23 irq_context: 0 sb_writers#3 &wb->list_lock irq_context: 0 sb_writers#3 &wb->list_lock &sb->s_type->i_lock_key#23 irq_context: 0 &sb->s_type->i_mutex_key#9 &p->alloc_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &pid->lock irq_context: 0 &p->alloc_lock &fs->lock &dentry->d_lock irq_context: 0 &p->lock namespace_sem irq_context: 0 &p->lock namespace_sem &new_ns->ns_lock irq_context: 0 &p->lock namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &p->lock namespace_sem rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &type->s_umount_key#32 irq_context: 0 &type->s_umount_key#32 &lru->node[i].lock irq_context: 0 &type->s_umount_key#32 &dentry->d_lock irq_context: 0 &type->s_umount_key#32 &sb->s_type->i_lock_key#22 irq_context: 0 &type->s_umount_key#32 &sb->s_type->i_lock_key#22 &lru->node[i].lock irq_context: 0 &type->s_umount_key#32 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#32 pool_lock#2 irq_context: 0 &type->s_umount_key#32 &obj_hash[i].lock pool_lock irq_context: 0 &type->s_umount_key#32 &journal->j_state_lock irq_context: 0 &type->s_umount_key#32 &p->alloc_lock irq_context: 0 &type->s_umount_key#32 (work_completion)(&sbi->s_error_work) irq_context: 0 &type->s_umount_key#32 &journal->j_state_lock irq_context: 0 &type->s_umount_key#32 key#3 irq_context: 0 &type->s_umount_key#32 key#4 irq_context: 0 &type->s_umount_key#32 &sbi->s_error_lock irq_context: 0 &type->s_umount_key#32 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#32 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#32 &base->lock irq_context: 0 &type->s_umount_key#32 &base->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#32 &fq->mq_flush_lock irq_context: 0 &type->s_umount_key#32 &fq->mq_flush_lock &q->requeue_lock irq_context: 0 &type->s_umount_key#32 &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#32 &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#32 &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#32 &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->s_umount_key#32 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#32 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#32 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#32 &rq->__lock irq_context: 0 &type->s_umount_key#32 bit_wait_table + i irq_context: 0 &type->s_umount_key#32 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &fq->mq_flush_lock bit_wait_table + i &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &type->s_umount_key#32 ext4_li_mtx irq_context: 0 &type->s_umount_key#32 ext4_li_mtx fs_reclaim irq_context: 0 &type->s_umount_key#32 ext4_li_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#32 ext4_li_mtx &____s->seqcount irq_context: 0 &type->s_umount_key#32 ext4_li_mtx pool_lock#2 irq_context: 0 &type->s_umount_key#32 ext4_li_mtx rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#32 ext4_li_mtx &obj_hash[i].lock irq_context: 0 &type->s_umount_key#32 ext4_li_mtx batched_entropy_u16.lock irq_context: 0 &type->s_umount_key#32 ext4_li_mtx &eli->li_list_mtx irq_context: 0 &type->s_umount_key#32 ext4_li_mtx kthread_create_lock irq_context: 0 &type->s_umount_key#32 ext4_li_mtx &p->pi_lock irq_context: 0 &type->s_umount_key#32 ext4_li_mtx &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &type->s_umount_key#32 ext4_li_mtx &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#32 ext4_li_mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#32 ext4_li_mtx &rq->__lock irq_context: 0 &type->s_umount_key#32 ext4_li_mtx &x->wait irq_context: 0 &type->s_umount_key#32 ext4_li_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &eli->li_list_mtx irq_context: 0 &type->s_umount_key#32 (console_sem).lock irq_context: 0 &type->s_umount_key#32 console_lock console_srcu console_owner_lock irq_context: 0 &type->s_umount_key#32 console_lock console_srcu console_owner irq_context: 0 &type->s_umount_key#32 console_lock console_srcu console_owner &port_lock_key irq_context: 0 &type->s_umount_key#32 console_lock console_srcu console_owner console_owner_lock irq_context: 0 &type->s_umount_key#32 mount_lock irq_context: 0 &type->s_umount_key#32 mount_lock mount_lock.seqcount irq_context: 0 &type->s_umount_key#32 mount_lock mount_lock.seqcount &new_ns->poll irq_context: 0 namespace_sem irq_context: 0 namespace_sem &new_ns->ns_lock irq_context: 0 rcu_read_lock &pid->lock irq_context: 0 &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 rename_lock.seqcount irq_context: 0 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &dentry->d_lock &dentry->d_lock/1 &lru->node[i].lock irq_context: 0 &pid->lock irq_context: 0 sb_writers#4 tk_core.seq.seqcount irq_context: 0 sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &____s->seqcount irq_context: 0 sb_writers#4 pool_lock#2 irq_context: 0 sb_writers#4 &c->lock irq_context: 0 sb_writers#4 &journal->j_state_lock irq_context: 0 sb_writers#4 &journal->j_state_lock irq_context: 0 sb_writers#4 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 jbd2_handle irq_context: 0 sb_writers#4 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 jbd2_handle &c->lock irq_context: 0 sb_writers#4 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 jbd2_handle &journal->j_revoke_lock irq_context: 0 sb_writers#4 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &wb->list_lock irq_context: 0 sb_writers#4 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &wb->work_lock irq_context: 0 sb_writers#4 &wb->work_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &wb->work_lock &base->lock irq_context: 0 sb_writers#4 &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &ei->xattr_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &xa->xa_lock#7 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 lock#4 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &dd->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 bit_wait_table + i irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq bit_wait_table + i &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_revoke_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &meta_group_info[i]->alloc_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle inode_hash_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle batched_entropy_u32.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_internal rcu_read_lock init_fs.seq.seqcount irq_context: 0 sb_internal rcu_read_lock rcu_read_lock mount_lock.seqcount irq_context: 0 sb_internal rcu_read_lock rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_internal mmu_notifier_invalidate_range_start irq_context: 0 sb_internal pool_lock#2 irq_context: 0 sb_internal &journal->j_state_lock irq_context: 0 sb_internal jbd2_handle irq_context: 0 sb_internal jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_internal jbd2_handle pool_lock#2 irq_context: 0 sb_internal jbd2_handle &ret->b_state_lock irq_context: 0 sb_internal jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_internal jbd2_handle &journal->j_revoke_lock irq_context: 0 sb_internal jbd2_handle &mapping->private_lock irq_context: 0 sb_internal jbd2_handle &journal->j_wait_updates irq_context: 0 sb_internal &obj_hash[i].lock irq_context: 0 &ei->i_data_sem irq_context: 0 &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &sighand->siglock hrtimer_bases.lock irq_context: 0 &sighand->siglock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 &sighand->siglock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 file_rwsem irq_context: 0 file_rwsem &ctx->flc_lock irq_context: 0 file_rwsem &ctx->flc_lock &fll->lock irq_context: 0 &ctx->flc_lock irq_context: 0 &sig->cred_guard_mutex tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex sb_writers#4 mount_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex sb_writers#4 pool_lock#2 irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &journal->j_state_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &mapping->private_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle pool_lock#2 irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &ret->b_state_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &journal->j_revoke_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &ei->i_raw_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &sb->s_type->i_lock_key#22 irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &wb->list_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 jbd2_handle &mapping->private_lock irq_context: 0 tomoyo_ss quarantine_lock irq_context: 0 &mm->mmap_lock &meta->lock irq_context: 0 &p->lock &c->lock irq_context: 0 &p->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock &____s->seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &____s->seqcount irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &c->lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &____s->seqcount irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &c->lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &c->lock irq_context: 0 &iint->mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &iint->mutex tk_core.seq.seqcount irq_context: 0 &iint->mutex sb_writers#4 mount_lock irq_context: 0 &iint->mutex sb_writers#4 tk_core.seq.seqcount irq_context: 0 &iint->mutex sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 &iint->mutex sb_writers#4 pool_lock#2 irq_context: 0 &iint->mutex sb_writers#4 &journal->j_state_lock irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle &mapping->private_lock irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle pool_lock#2 irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle &ret->b_state_lock irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle &journal->j_revoke_lock irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle &ei->i_raw_lock irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 &iint->mutex sb_writers#4 &obj_hash[i].lock irq_context: 0 &iint->mutex sb_writers#4 &sb->s_type->i_lock_key#22 irq_context: 0 &iint->mutex sb_writers#4 &wb->list_lock irq_context: 0 &iint->mutex sb_writers#4 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 rcu_read_lock &p->alloc_lock irq_context: 0 &type->s_umount_key#33/1 irq_context: 0 &type->s_umount_key#33/1 fs_reclaim irq_context: 0 &type->s_umount_key#33/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#33/1 pool_lock#2 irq_context: 0 &type->s_umount_key#33/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#33/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#33/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#33/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#33/1 sb_lock irq_context: 0 &type->s_umount_key#33/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#33/1 &c->lock irq_context: 0 &type->s_umount_key#33/1 &____s->seqcount irq_context: 0 &type->s_umount_key#33/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#33/1 &sb->s_type->i_lock_key#25 irq_context: 0 &type->s_umount_key#33/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#33/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#33/1 &sb->s_type->i_lock_key#25 &dentry->d_lock irq_context: 0 &type->s_umount_key#33/1 &sb->s_type->i_mutex_key#11 irq_context: 0 &type->s_umount_key#33/1 &sb->s_type->i_mutex_key#11 fs_reclaim irq_context: 0 &type->s_umount_key#33/1 &sb->s_type->i_mutex_key#11 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#33/1 &sb->s_type->i_mutex_key#11 pool_lock#2 irq_context: 0 &type->s_umount_key#33/1 &sb->s_type->i_mutex_key#11 &dentry->d_lock irq_context: 0 &type->s_umount_key#33/1 &sb->s_type->i_mutex_key#11 &c->lock irq_context: 0 &type->s_umount_key#33/1 &sb->s_type->i_mutex_key#11 &____s->seqcount irq_context: 0 &type->s_umount_key#33/1 &sb->s_type->i_mutex_key#11 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#33/1 &sb->s_type->i_mutex_key#11 &sb->s_type->i_lock_key#25 irq_context: 0 &type->s_umount_key#33/1 &sb->s_type->i_mutex_key#11 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#33/1 &sb->s_type->i_mutex_key#11 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#33/1 &sb->s_type->i_mutex_key#11 &sb->s_type->i_lock_key#25 &dentry->d_lock irq_context: 0 &type->s_umount_key#33/1 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#2 irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem fs_reclaim irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem rename_lock irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem rename_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem mount_lock irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem mount_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem mount_lock mount_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem mount_lock mount_lock.seqcount &new_ns->poll irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem mount_lock mount_lock.seqcount &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem mount_lock mount_lock.seqcount rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem mount_lock mount_lock.seqcount &obj_hash[i].lock irq_context: 0 &type->s_umount_key/1 fs_reclaim irq_context: 0 &type->s_umount_key/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem &c->lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem &____s->seqcount irq_context: 0 &type->s_umount_key#34 irq_context: 0 &type->s_umount_key#34 sb_lock irq_context: 0 &type->s_umount_key#34 fs_reclaim irq_context: 0 &type->s_umount_key#34 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#34 pool_lock#2 irq_context: 0 &type->s_umount_key#34 &dentry->d_lock irq_context: 0 &type->s_umount_key#34 &lru->node[i].lock irq_context: 0 &type->s_umount_key#34 rcu_read_lock &dentry->d_lock irq_context: 0 &type->s_umount_key#34 &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#4 irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem fs_reclaim irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem rename_lock irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem rename_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem mount_lock irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem mount_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem mount_lock mount_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem mount_lock mount_lock.seqcount &new_ns->poll irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem mount_lock mount_lock.seqcount &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem mount_lock mount_lock.seqcount rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem mount_lock mount_lock.seqcount &obj_hash[i].lock irq_context: 0 &type->s_umount_key#35 irq_context: 0 &type->s_umount_key#35 sb_lock irq_context: 0 &type->s_umount_key#35 &dentry->d_lock irq_context: 0 rcu_read_lock &dentry->d_lock sysctl_lock irq_context: 0 &type->s_umount_key#36/1 irq_context: 0 &type->s_umount_key#36/1 fs_reclaim irq_context: 0 &type->s_umount_key#36/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#36/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#36/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#36/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#36/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#36/1 sb_lock irq_context: 0 &type->s_umount_key#36/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#36/1 pool_lock#2 irq_context: 0 &type->s_umount_key#36/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#36/1 &sb->s_type->i_lock_key#26 irq_context: 0 &type->s_umount_key#36/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#36/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#36/1 &sb->s_type->i_lock_key#26 &dentry->d_lock irq_context: 0 &type->s_umount_key#36/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem rename_lock irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem rename_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem mount_lock irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem mount_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem mount_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem mount_lock mount_lock.seqcount &new_ns->poll irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem mount_lock mount_lock.seqcount &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem mount_lock mount_lock.seqcount rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem mount_lock mount_lock.seqcount &obj_hash[i].lock irq_context: 0 redirect_lock irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock fs_reclaim irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock pool_lock#2 irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &mm->mmap_lock irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &tty->write_wait irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &ldata->output_lock irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &ldata->output_lock &port_lock_key irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &port_lock_key irq_context: 0 &tty->ldisc_sem &tty->write_wait irq_context: hardirq &i->lock &port_lock_key irq_context: hardirq &i->lock &port_lock_key &port->lock irq_context: hardirq &i->lock &port_lock_key &tty->write_wait irq_context: hardirq &i->lock &port_lock_key &tty->write_wait &p->pi_lock irq_context: 0 &type->s_umount_key#37/1 irq_context: 0 &type->s_umount_key#37/1 fs_reclaim irq_context: 0 &type->s_umount_key#37/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#37/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#37/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#37/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#37/1 &c->lock irq_context: 0 &type->s_umount_key#37/1 &____s->seqcount irq_context: 0 &type->s_umount_key#37/1 pool_lock#2 irq_context: 0 &type->s_umount_key#37/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#37/1 sb_lock irq_context: 0 &type->s_umount_key#37/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#37/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#37/1 &sb->s_type->i_lock_key#27 irq_context: 0 &type->s_umount_key#37/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#37/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#37/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#37/1 rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#37/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#37/1 &sb->s_type->i_lock_key#27 &dentry->d_lock irq_context: 0 &type->s_umount_key#37/1 fuse_mutex irq_context: 0 &type->s_umount_key#37/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#38/1 irq_context: 0 &type->s_umount_key#38/1 fs_reclaim irq_context: 0 &type->s_umount_key#38/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#38/1 pool_lock#2 irq_context: 0 &type->s_umount_key#38/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#38/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#38/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#38/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#38/1 sb_lock irq_context: 0 &type->s_umount_key#38/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#38/1 &c->lock irq_context: 0 &type->s_umount_key#38/1 &____s->seqcount irq_context: 0 &type->s_umount_key#38/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#38/1 &sb->s_type->i_lock_key#28 irq_context: 0 &type->s_umount_key#38/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#38/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#38/1 &sb->s_type->i_lock_key#28 &dentry->d_lock irq_context: 0 &type->s_umount_key#38/1 pstore_sb_lock irq_context: 0 &type->s_umount_key#38/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#39/1 irq_context: 0 &type->s_umount_key#39/1 fs_reclaim irq_context: 0 &type->s_umount_key#39/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#39/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#39/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#39/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#39/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#39/1 sb_lock irq_context: 0 &type->s_umount_key#39/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#39/1 pool_lock#2 irq_context: 0 &type->s_umount_key#39/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#39/1 &sb->s_type->i_lock_key#29 irq_context: 0 &type->s_umount_key#39/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#39/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#39/1 &sb->s_type->i_lock_key#29 &dentry->d_lock irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock (kmod_concurrent_max).lock irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock fs_reclaim irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock pool_lock#2 irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock rcu_read_lock &pool->lock/1 irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock &x->wait#17 irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock &rq->__lock irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock batched_entropy_u64.lock crngs.lock irq_context: 0 &mm->mmap_lock quarantine_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock irq_context: 0 uts_sem irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock running_helpers_waitq.lock irq_context: 0 &type->s_umount_key#39/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#14 irq_context: 0 &type->s_umount_key#14 sb_lock irq_context: 0 &type->s_umount_key#14 fs_reclaim irq_context: 0 &type->s_umount_key#14 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#14 &____s->seqcount irq_context: 0 &type->s_umount_key#14 pool_lock#2 irq_context: 0 &type->s_umount_key#14 rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#14 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#14 &dentry->d_lock irq_context: 0 &type->s_umount_key#14 &lru->node[i].lock irq_context: 0 &type->s_umount_key#14 rcu_read_lock &dentry->d_lock irq_context: 0 rcu_read_lock &sb->s_type->i_lock_key irq_context: 0 &type->i_mutex_dir_key#5 irq_context: 0 &type->i_mutex_dir_key#5 fs_reclaim irq_context: 0 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#5 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#5 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#5 &dentry->d_lock &wq irq_context: 0 sb_writers#5 irq_context: 0 sb_writers#5 mount_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &dentry->d_lock &wq#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &sbinfo->stat_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &sb->s_type->i_lock_key irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &s->s_inode_list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tk_core.seq.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 batched_entropy_u32.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &sb->s_type->i_lock_key &dentry->d_lock irq_context: 0 &sb->s_type->i_lock_key irq_context: 0 &sb->s_type->i_mutex_key#12 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem quarantine_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fs_reclaim irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sbinfo->stat_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &s->s_inode_list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tk_core.seq.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 batched_entropy_u32.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 tk_core.seq.seqcount irq_context: 0 sb_writers#5 &sb->s_type->i_lock_key irq_context: 0 sb_writers#5 &wb->list_lock irq_context: 0 sb_writers#5 &wb->list_lock &sb->s_type->i_lock_key irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &obj_hash[i].lock pool_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &c->lock irq_context: 0 &f->f_lock irq_context: 0 &sig->cred_guard_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 key#5 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &pcp->lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#5 &c->lock irq_context: 0 &type->i_mutex_dir_key#5 &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#5 pool_lock#2 irq_context: 0 uts_sem irq_context: 0 uts_sem hostname_poll.wait.lock irq_context: 0 rcu_read_lock rcu_read_lock mount_lock.seqcount irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 mount_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &ei->i_raw_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &sb->s_type->i_lock_key#22 irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &wb->list_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 &fs->lock &dentry->d_lock irq_context: 0 dup_mmap_sem irq_context: 0 dup_mmap_sem &mm->mmap_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &____s->seqcount irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 pool_lock#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &vma->vm_lock->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 mmu_notifier_invalidate_range_start irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mm->page_table_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page) irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page)#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page)#2 ptlock_ptr(page)#2/1 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page)#2 key irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mm->context.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &obj_hash[i].lock irq_context: 0 &p->alloc_lock &memcg->mm_list.lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 lock#5 irq_context: 0 &sig->cred_guard_mutex &iint->mutex &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &tsk->futex_exit_mutex &mm->mmap_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock delayed_uprobe_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#4 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#4 &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#5 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 key irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &vma->vm_lock->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#4 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#5 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &memcg->mm_list.lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem quarantine_lock irq_context: 0 &type->i_mutex_dir_key#5 rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#5 &dentry->d_lock &wq#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &xattrs->lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tk_core.seq.seqcount irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 fs_reclaim irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &____s->seqcount irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 pool_lock#2 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &xa->xa_lock#7 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 lock#4 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &info->lock irq_context: 0 &p->alloc_lock &x->wait#25 irq_context: 0 &p->alloc_lock &x->wait#25 &p->pi_lock irq_context: 0 &p->alloc_lock &x->wait#25 &p->pi_lock &rq->__lock irq_context: 0 &p->alloc_lock &x->wait#25 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tasklist_lock &sighand->siglock &p->pi_lock irq_context: 0 tasklist_lock &sighand->siglock &p->pi_lock &rq->__lock irq_context: 0 tasklist_lock &sighand->siglock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sighand->siglock &obj_hash[i].lock irq_context: 0 &sighand->siglock pool_lock#2 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &u->bindlock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &u->bindlock &net->unx.table.locks[i] irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &u->bindlock &net->unx.table.locks[i] &net->unx.table.locks[i]/1 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &u->bindlock &net->unx.table.locks[i] &net->unx.table.locks[i]/1 &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &u->bindlock &net->unx.table.locks[i]/1 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &u->bindlock &bsd_socket_locks[i] irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &c->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss rcu_read_lock pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tk_core.seq.seqcount irq_context: 0 rcu_read_lock mount_lock.seqcount irq_context: 0 &u->iolock irq_context: 0 &u->iolock rlock-AF_UNIX irq_context: 0 &ei->socket.wq.wait irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock pgd_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock key irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock pcpu_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock percpu_counters_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock pool_lock#2 irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &xa->xa_lock#7 &c->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &xa->xa_lock#7 &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock lock#4 irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &ei->i_es_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &dd->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock &dd->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &folio_wait_table[i] irq_context: 0 &sig->cred_guard_mutex &iint->mutex &rq->__lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &iint->mutex ima_extend_list_mutex &c->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex ima_extend_list_mutex &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &p->alloc_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq#3 irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &dentry->d_lock &lru->node[i].lock irq_context: 0 &bsd_socket_locks[i] irq_context: 0 sb_writers tk_core.seq.seqcount irq_context: 0 sb_writers &sb->s_type->i_lock_key#5 irq_context: 0 sb_writers &wb->list_lock irq_context: 0 sb_writers &wb->list_lock &sb->s_type->i_lock_key#5 irq_context: 0 &u->lock irq_context: 0 &u->lock &u->lock/1 irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 lock#4 &obj_hash[i].lock irq_context: 0 &group->mark_mutex irq_context: 0 &group->mark_mutex &fsnotify_mark_srcu irq_context: 0 &group->mark_mutex fs_reclaim irq_context: 0 &group->mark_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &group->mark_mutex &____s->seqcount irq_context: 0 &group->mark_mutex pool_lock#2 irq_context: 0 &group->mark_mutex &c->lock irq_context: 0 &group->mark_mutex lock irq_context: 0 &group->mark_mutex lock &group->inotify_data.idr_lock irq_context: 0 &group->mark_mutex lock &group->inotify_data.idr_lock pool_lock#2 irq_context: 0 &group->mark_mutex ucounts_lock irq_context: 0 &group->mark_mutex &rq->__lock irq_context: 0 &group->mark_mutex &mark->lock irq_context: 0 &group->mark_mutex &mark->lock &fsnotify_mark_srcu irq_context: 0 &group->mark_mutex &mark->lock &fsnotify_mark_srcu &conn->lock irq_context: 0 &group->mark_mutex &mark->lock &conn->lock irq_context: 0 &group->mark_mutex &conn->lock irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key#5 irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key#5 &dentry->d_lock irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key#5 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &dentry->d_lock/1 irq_context: 0 &type->i_mutex_dir_key#2 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &type->i_mutex_dir_key#2 rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#2 tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#2 sb_writers mount_lock irq_context: 0 &type->i_mutex_dir_key#2 sb_writers tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#2 sb_writers &sb->s_type->i_lock_key#5 irq_context: 0 &type->i_mutex_dir_key#2 sb_writers &wb->list_lock irq_context: 0 &type->i_mutex_dir_key#2 sb_writers &wb->list_lock &sb->s_type->i_lock_key#5 irq_context: 0 &fsnotify_mark_srcu &conn->lock irq_context: 0 &conn->lock irq_context: 0 &evdev->client_lock irq_context: 0 &evdev->mutex irq_context: 0 &evdev->mutex &dev->mutex#2 irq_context: 0 &evdev->mutex &mm->mmap_lock irq_context: 0 pcpu_alloc_mutex rcu_node_0 irq_context: 0 rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pcpu_alloc_mutex &rcu_state.expedited_wq irq_context: 0 pcpu_alloc_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pcpu_alloc_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pcpu_alloc_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pcpu_alloc_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) pool_lock#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[2] &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex rcu_node_0 irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rcu_state.expedited_wq irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &evdev->mutex &dev->mutex#2 &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETLINK irq_context: 0 sk_lock-AF_NETLINK slock-AF_NETLINK irq_context: 0 slock-AF_NETLINK irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rhashtable_bucket irq_context: 0 cb_lock irq_context: 0 cb_lock genl_mutex irq_context: 0 cb_lock genl_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rlock-AF_NETLINK irq_context: 0 cb_lock fs_reclaim irq_context: 0 cb_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock pool_lock#2 irq_context: 0 cb_lock rlock-AF_NETLINK irq_context: 0 rlock-AF_NETLINK irq_context: 0 &nlk->wait irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rhashtable_bucket irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex batched_entropy_u32.lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex rhashtable_bucket irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex rhashtable_bucket rhashtable_bucket/1 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 nl_table_lock irq_context: 0 &sb->s_type->i_mutex_key#10 nl_table_wait.lock irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_NETLINK irq_context: 0 &sb->s_type->i_mutex_key#10 genl_sk_destructing_waitq.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &nlk->wait irq_context: 0 &sb->s_type->i_mutex_key#10 wlock-AF_NETLINK irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) irq_context: 0 (wq_completion)events (work_completion)(&w->w) nfc_devlist_mutex irq_context: 0 (wq_completion)events (work_completion)(&w->w) nfc_devlist_mutex &k->list_lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) nfc_devlist_mutex &k->k_lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) nfc_devlist_mutex &genl_data->genl_data_mutex irq_context: 0 (wq_completion)events (work_completion)(&w->w) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rdev->beacon_registrations_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rdev->mgmt_registrations_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &wdev->pmsr_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem reg_indoor_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem hwsim_radio_lock irq_context: 0 nl_table_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &obj_hash[i].lock irq_context: 0 sb_writers#6 irq_context: 0 sb_writers#6 mount_lock irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss &c->lock irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tk_core.seq.seqcount irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 &sb->s_type->i_lock_key#8 irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 &wb->list_lock irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 &wb->list_lock &sb->s_type->i_lock_key#8 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss quarantine_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &u->bindlock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &u->bindlock &net->unx.table.locks[i] irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &u->bindlock &net->unx.table.locks[i] &net->unx.table.locks[i]/1 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &u->bindlock &net->unx.table.locks[i] &net->unx.table.locks[i]/1 &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &u->bindlock &bsd_socket_locks[i] irq_context: 0 &u->lock &sk->sk_peer_lock irq_context: 0 &u->lock rlock-AF_UNIX irq_context: 0 rcu_read_lock &ei->socket.wq.wait irq_context: 0 rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock &mm->mmap_lock irq_context: 0 &u->iolock &obj_hash[i].lock irq_context: 0 &u->iolock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &sb->s_type->i_lock_key irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &wb->list_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &wb->list_lock &sb->s_type->i_lock_key irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss tomoyo_policy_lock &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 &group->notification_waitq irq_context: 0 &group->notification_lock irq_context: 0 &client->wait irq_context: softirq rcu_callback rlock-AF_NETLINK irq_context: softirq rcu_callback &dir->lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_es_lock key#6 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 &p->lock &pcp->lock &zone->lock irq_context: 0 &p->lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 syslog_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &xa->xa_lock#7 pool_lock#2 irq_context: 0 rcu_read_lock &rcu_state.gp_wq irq_context: 0 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_lock_key#14 irq_context: 0 &sb->s_type->i_lock_key#14 &dentry->d_lock irq_context: 0 &pipe->mutex/1 irq_context: 0 &pipe->rd_wait irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 lock#4 &lruvec->lru_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &pcp->lock &zone->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#5 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 rename_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key#2 rcu_read_lock &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &stopper->lock irq_context: 0 &sig->cred_guard_mutex &stop_pi_lock irq_context: 0 &sig->cred_guard_mutex &stop_pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &x->wait#8 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &____s->seqcount irq_context: 0 &u->iolock quarantine_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &meta->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem kfence_freelist_lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &pcp->lock &zone->lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &u->lock clock-AF_UNIX irq_context: 0 &u->peer_wait irq_context: 0 rlock-AF_UNIX irq_context: 0 &iint->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 key#9 irq_context: 0 &pipe->mutex/1 &pipe->rd_wait irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &p->pi_lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &rq->__lock irq_context: 0 &pipe->mutex/1 &lock->wait_lock irq_context: 0 &pipe->mutex/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &pipe->wr_wait irq_context: 0 &lock->wait_lock irq_context: 0 &u->iolock &rq->__lock irq_context: 0 &pipe->mutex/1 fs_reclaim irq_context: 0 &pipe->mutex/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &pipe->mutex/1 &____s->seqcount irq_context: 0 &pipe->mutex/1 pool_lock#2 irq_context: 0 &pipe->mutex/1 &mm->mmap_lock irq_context: 0 &pipe->rd_wait &p->pi_lock irq_context: 0 &pipe->rd_wait &p->pi_lock &rq->__lock irq_context: 0 &pipe->rd_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->rd_wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#7 tk_core.seq.seqcount irq_context: 0 sb_writers#7 mount_lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 lock#4 rcu_read_lock &____s->seqcount irq_context: 0 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 tasklist_lock &sighand->siglock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &u->iolock &meta->lock irq_context: 0 &u->iolock kfence_freelist_lock irq_context: 0 &mm->mmap_lock &n->list_lock irq_context: 0 &mm->mmap_lock lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock lock#4 &obj_hash[i].lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &rq->__lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 syslog_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 tk_core.seq.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 rename_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 &____s->seqcount#4 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 &____s->seqcount#4 &____s->seqcount#4/1 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock/2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock/2 &dentry->d_lock/3 irq_context: 0 &u->lock &u->peer_wait irq_context: 0 &u->iolock &u->peer_wait irq_context: 0 &u->iolock &u->peer_wait &p->pi_lock irq_context: 0 &u->iolock &u->peer_wait &p->pi_lock &rq->__lock irq_context: 0 &u->iolock &u->peer_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 &xa->xa_lock#7 &c->lock irq_context: 0 &type->i_mutex_dir_key#3 &xa->xa_lock#7 &____s->seqcount irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &c->lock irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &____s->seqcount irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rename_lock.seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &dentry->d_lock sysctl_lock irq_context: 0 sb_writers#3 &dentry->d_lock irq_context: 0 sb_writers#3 tomoyo_ss irq_context: 0 sb_writers#3 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 tomoyo_ss rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#3 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#3 tomoyo_ss &c->lock irq_context: 0 sb_writers#3 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#3 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#3 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#3 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tk_core.seq.seqcount irq_context: 0 sb_writers#3 &mm->mmap_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 batched_entropy_u8.lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 kfence_freelist_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 pool_lock#2 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem ptlock_ptr(page) irq_context: 0 &sb->s_type->i_mutex_key#9 &dentry->d_lock &wq irq_context: 0 &sb->s_type->i_mutex_key#9 &obj_hash[i].lock irq_context: 0 sk_lock-AF_UNIX irq_context: 0 sk_lock-AF_UNIX slock-AF_UNIX irq_context: 0 slock-AF_UNIX irq_context: hardirq log_wait.lock &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &ret->b_state_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_revoke_lock irq_context: 0 &ei->xattr_sem irq_context: 0 &type->i_mutex_dir_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_es_lock key#6 irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 &type->i_mutex_dir_key#3 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xattrs->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &n->list_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &n->list_lock &c->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &dentry->d_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &s->s_inode_list_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &p->alloc_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq#2 irq_context: 0 sb_writers#3 oom_adj_mutex irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &p->alloc_lock irq_context: 0 sb_writers#3 oom_adj_mutex &p->alloc_lock irq_context: 0 low_water_lock console_owner_lock irq_context: 0 low_water_lock console_owner irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key#22 irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key#22 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock pool_lock#2 irq_context: 0 &sk->sk_peer_lock irq_context: 0 &ep->mtx irq_context: 0 epnested_mutex irq_context: 0 epnested_mutex &ep->mtx irq_context: 0 epnested_mutex &ep->mtx fs_reclaim irq_context: 0 epnested_mutex &ep->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 epnested_mutex &ep->mtx &____s->seqcount irq_context: 0 epnested_mutex &ep->mtx pool_lock#2 irq_context: 0 epnested_mutex &ep->mtx &c->lock irq_context: 0 epnested_mutex &ep->mtx &f->f_lock irq_context: 0 epnested_mutex &ep->mtx &ei->socket.wq.wait irq_context: 0 epnested_mutex &ep->mtx &ep->lock irq_context: 0 epnested_mutex rcu_read_lock &f->f_lock irq_context: 0 &ep->mtx fs_reclaim irq_context: 0 &ep->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &ep->mtx &f->f_lock irq_context: 0 &ep->mtx pool_lock#2 irq_context: 0 &ep->mtx &group->notification_waitq irq_context: 0 &ep->mtx &group->notification_lock irq_context: 0 &ep->mtx &ep->lock irq_context: 0 &ep->mtx &sighand->signalfd_wqh irq_context: 0 &ep->mtx &sighand->siglock irq_context: 0 &ep->mtx &ei->socket.wq.wait irq_context: 0 &ep->lock irq_context: hardirq log_wait.lock &p->pi_lock &rq->__lock irq_context: hardirq log_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &c->lock irq_context: 0 &mm->mmap_lock &c->lock batched_entropy_u8.lock irq_context: 0 &mm->mmap_lock &c->lock kfence_freelist_lock irq_context: 0 &type->i_mutex_dir_key#4 rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock &wq#2 irq_context: 0 &type->i_mutex_dir_key#4 &mm->mmap_lock fs_reclaim irq_context: 0 &type->i_mutex_dir_key#4 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#4 &mm->mmap_lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#4 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &type->i_mutex_dir_key#4 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 &type->i_mutex_dir_key#4 tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#4 &c->lock irq_context: 0 &type->i_mutex_dir_key#4 &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &c->lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &pcp->lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#4 &pcp->lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#4 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#4 sb_writers#8 mount_lock irq_context: 0 &type->i_mutex_dir_key#4 sb_writers#8 tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#4 sb_writers#8 &sb->s_type->i_lock_key#24 irq_context: 0 &type->i_mutex_dir_key#4 sb_writers#8 &wb->list_lock irq_context: 0 &type->i_mutex_dir_key#4 sb_writers#8 &wb->list_lock &sb->s_type->i_lock_key#24 irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 remove_cache_srcu irq_context: 0 remove_cache_srcu quarantine_lock irq_context: 0 remove_cache_srcu &c->lock irq_context: 0 remove_cache_srcu &n->list_lock irq_context: 0 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 remove_cache_srcu &obj_hash[i].lock irq_context: 0 tomoyo_ss &n->list_lock irq_context: 0 tomoyo_ss &n->list_lock &c->lock irq_context: 0 &vma->vm_lock->lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 swap_lock irq_context: 0 sb_writers#8 irq_context: 0 sb_writers#8 mount_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rename_lock.seqcount irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 fs_reclaim irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &dentry->d_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem inode_hash_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem fs_reclaim irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &c->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &____s->seqcount irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#24 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &sb->s_type->i_lock_key#24 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock &wq#2 irq_context: 0 kn->active fs_reclaim irq_context: 0 kn->active fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active pool_lock#2 irq_context: 0 kn->active &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active &kernfs_locks->open_file_mutex[count] pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#13 irq_context: 0 sb_writers#8 fs_reclaim irq_context: 0 sb_writers#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 pool_lock#2 irq_context: 0 sb_writers#8 &mm->mmap_lock irq_context: 0 sb_writers#8 &of->mutex irq_context: 0 sb_writers#8 &of->mutex kn->active &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active &obj_hash[i].lock irq_context: 0 sb_writers#8 &obj_hash[i].lock irq_context: 0 &kernfs_locks->open_file_mutex[count] irq_context: 0 &kernfs_locks->open_file_mutex[count] &obj_hash[i].lock irq_context: 0 &kernfs_locks->open_file_mutex[count] pool_lock#2 irq_context: 0 &kernfs_locks->open_file_mutex[count] krc.lock irq_context: 0 &kernfs_locks->open_file_mutex[count] krc.lock &obj_hash[i].lock irq_context: 0 &kernfs_locks->open_file_mutex[count] krc.lock &base->lock irq_context: 0 &kernfs_locks->open_file_mutex[count] krc.lock &base->lock &obj_hash[i].lock irq_context: 0 kn->active#2 fs_reclaim irq_context: 0 kn->active#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#2 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#2 pool_lock#2 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 &obj_hash[i].lock irq_context: 0 kn->active#2 &c->lock irq_context: 0 kn->active#2 &____s->seqcount irq_context: 0 kn->active#2 pool_lock#2 irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 &mm->mmap_lock &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem pool_lock#2 irq_context: 0 kn->active &c->lock irq_context: 0 &type->i_mutex_dir_key#3 &n->list_lock irq_context: 0 &type->i_mutex_dir_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &c->lock irq_context: 0 &type->i_mutex_dir_key#5 tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#5 sb_writers#5 mount_lock irq_context: 0 &type->i_mutex_dir_key#5 sb_writers#5 tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#5 sb_writers#5 &sb->s_type->i_lock_key irq_context: 0 &type->i_mutex_dir_key#5 sb_writers#5 &wb->list_lock irq_context: 0 &type->i_mutex_dir_key#5 sb_writers#5 &wb->list_lock &sb->s_type->i_lock_key irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &____s->seqcount irq_context: 0 kn->active remove_cache_srcu irq_context: 0 kn->active remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active remove_cache_srcu irq_context: 0 sb_writers#8 &of->mutex kn->active remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex &____s->seqcount irq_context: 0 &kernfs_locks->open_file_mutex[count] &obj_hash[i].lock pool_lock irq_context: 0 kn->active &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 sb_writers#8 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex &____s->seqcount irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#2 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 &____s->seqcount irq_context: 0 kn->active#2 &n->list_lock irq_context: 0 kn->active#2 &n->list_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 batched_entropy_u32.lock crngs.lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &n->list_lock irq_context: 0 sb_writers#8 &n->list_lock &c->lock irq_context: 0 sb_writers#8 remove_cache_srcu irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu irq_context: 0 sb_writers#8 remove_cache_srcu quarantine_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &vma->vm_lock->lock &rq->__lock irq_context: 0 &vma->vm_lock->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &n->list_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &n->list_lock &c->lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &c->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#2 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override pool_lock irq_context: 0 &ep->mtx &____s->seqcount irq_context: 0 &ep->mtx &c->lock irq_context: 0 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 quarantine_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &n->list_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &c->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 tomoyo_ss &rq->__lock irq_context: 0 &ep->mtx &rq->__lock irq_context: 0 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &folio_wait_table[i] irq_context: 0 sb_writers#8 &of->mutex kn->active quarantine_lock irq_context: 0 sb_writers#8 &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 &n->list_lock irq_context: 0 &type->i_mutex_dir_key#5 &n->list_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &sem->wait_lock irq_context: 0 sb_writers#8 &mm->mmap_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &rq->__lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &rq->__lock irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override &c->lock irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 remove_cache_srcu irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active &n->list_lock &c->lock irq_context: 0 kn->active &n->list_lock irq_context: 0 kn->active &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active &rq->__lock irq_context: 0 kn->active#2 remove_cache_srcu irq_context: 0 kn->active#2 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#2 remove_cache_srcu &c->lock irq_context: 0 kn->active#2 remove_cache_srcu &n->list_lock irq_context: 0 kn->active#2 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 kn->active#2 remove_cache_srcu &obj_hash[i].lock irq_context: 0 remove_cache_srcu &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 rcu_read_lock &dentry->d_lock irq_context: 0 tomoyo_ss remove_cache_srcu irq_context: 0 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#5 &obj_hash[i].lock irq_context: 0 sb_writers#8 remove_cache_srcu &rq->__lock irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 kn->active#2 &rq->__lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] fs_reclaim &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 batched_entropy_u8.lock irq_context: 0 &type->i_mutex_dir_key#5 kfence_freelist_lock irq_context: 0 &root->kernfs_rwsem &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 kn->active#2 fs_reclaim &rq->__lock irq_context: 0 tomoyo_ss batched_entropy_u8.lock irq_context: 0 tomoyo_ss kfence_freelist_lock irq_context: 0 tomoyo_ss &meta->lock irq_context: 0 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex &n->list_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] rcu_read_lock rcu_node_0 irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rq->__lock irq_context: 0 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 kn->active#2 rcu_read_lock rcu_node_0 irq_context: 0 kn->active#2 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex remove_cache_srcu irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#2 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 batched_entropy_u8.lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) krc.lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#2 remove_cache_srcu &rq->__lock irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex rcu_node_0 irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) krc.lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &obj_hash[i].lock pool_lock irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 kn->active#3 fs_reclaim irq_context: 0 kn->active#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex &____s->seqcount irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#3 &c->lock irq_context: 0 kn->active#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#3 &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 kn->active#3 &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 &n->list_lock &c->lock irq_context: 0 kn->active#3 &n->list_lock irq_context: 0 kn->active#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex fs_reclaim &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 remove_cache_srcu irq_context: 0 sb_writers#8 &of->mutex kn->active#3 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_node_0 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 kn->active#3 fs_reclaim &rq->__lock irq_context: 0 &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#3 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#8 rcu_node_0 irq_context: 0 sb_writers#8 rcu_read_lock rcu_node_0 irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 kn->active#3 rcu_read_lock rcu_node_0 irq_context: 0 kn->active#3 rcu_read_lock &rq->__lock irq_context: 0 kn->active#3 remove_cache_srcu irq_context: 0 kn->active#3 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#3 remove_cache_srcu &c->lock irq_context: 0 kn->active#3 remove_cache_srcu &n->list_lock irq_context: 0 kn->active#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#3 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 kn->active#3 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex remove_cache_srcu irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#13 &rq->__lock irq_context: 0 kn->active#3 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 kn->active#4 fs_reclaim irq_context: 0 kn->active#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#4 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#4 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#4 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex irq_context: 0 &p->lock &of->mutex kn->active#4 param_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rename_lock.seqcount irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem irq_context: 0 sb_writers#8 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &dentry->d_lock irq_context: 0 sb_writers#8 tomoyo_ss irq_context: 0 sb_writers#8 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#8 kn->active#4 fs_reclaim irq_context: 0 sb_writers#8 kn->active#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 kn->active#4 &kernfs_locks->open_file_mutex[count] irq_context: 0 sb_writers#8 kn->active#4 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 sb_writers#8 kn->active#4 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 irq_context: 0 sb_writers#8 tomoyo_ss &c->lock irq_context: 0 sb_writers#8 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#8 iattr_mutex irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex fs_reclaim irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex &____s->seqcount irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex &rq->__lock irq_context: 0 &root->kernfs_iattr_rwsem &sem->wait_lock irq_context: 0 &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex pool_lock#2 irq_context: 0 &root->kernfs_iattr_rwsem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex &c->lock irq_context: 0 &root->kernfs_iattr_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex tk_core.seq.seqcount irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &sem->wait_lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &p->pi_lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#4 param_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#4 param_lock disk_events_mutex irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu irq_context: 0 &mm->mmap_lock remove_cache_srcu quarantine_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &c->lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &n->list_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_lock_key#24 irq_context: 0 &type->i_mutex_dir_key#4 &n->list_lock irq_context: 0 &type->i_mutex_dir_key#4 &n->list_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu quarantine_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu &c->lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu &n->list_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu &rq->__lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#5 &sem->wait_lock irq_context: 0 &type->i_mutex_dir_key#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sem->wait_lock irq_context: 0 sb_writers#5 &sem->wait_lock irq_context: 0 sb_writers#5 &p->pi_lock irq_context: 0 sb_writers#5 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &rq->__lock irq_context: 0 sb_writers#8 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &sb->s_type->i_lock_key#24 irq_context: 0 sb_writers#8 &wb->list_lock irq_context: 0 sb_writers#8 &wb->list_lock &sb->s_type->i_lock_key#24 irq_context: 0 &type->i_mutex_dir_key#4 rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#4 rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#4 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 &pcp->lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#5 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem batched_entropy_u8.lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem kfence_freelist_lock irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu quarantine_lock irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu &c->lock irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu &n->list_lock irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 tk_core.seq.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu irq_context: 0 sb_writers#5 &s->s_inode_list_lock irq_context: 0 sb_writers#5 &info->lock irq_context: 0 sb_writers#5 &sbinfo->stat_lock irq_context: 0 sb_writers#5 &xa->xa_lock#7 irq_context: 0 sb_writers#5 &obj_hash[i].lock irq_context: 0 sb_writers#5 pool_lock#2 irq_context: 0 sb_writers#5 &fsnotify_mark_srcu irq_context: 0 &type->s_umount_key#32 sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#32 sb_writers#4 &____s->seqcount irq_context: 0 &type->s_umount_key#32 sb_writers#4 pool_lock#2 irq_context: 0 &type->s_umount_key#32 sb_writers#4 &xa->xa_lock#7 irq_context: 0 &type->s_umount_key#32 sb_writers#4 lock#4 irq_context: 0 &type->s_umount_key#32 sb_writers#4 &mapping->private_lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#32 sb_writers#4 &dd->lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 &c->lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 rcu_read_lock &dd->lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 rcu_read_lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#32 sb_writers#4 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#32 sb_writers#4 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 &xa->xa_lock#7 pool_lock#2 irq_context: 0 &type->s_umount_key#32 sb_writers#4 lock#4 &lruvec->lru_lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 bit_wait_table + i irq_context: 0 &type->s_umount_key#32 sb_writers#4 &rq->__lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#32 &eli->li_list_mtx irq_context: 0 &type->s_umount_key#32 sb_writers#4 &journal->j_state_lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem pool_lock#2 irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &dd->lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &x->wait#26 irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &obj_hash[i].lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_node_0 irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &rq->__lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_read_lock rcu_node_0 irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &base->lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &base->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &x->wait#26 irq_context: softirq &x->wait#26 &p->pi_lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem (&timer.timer) irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock &q->requeue_lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &fq->mq_flush_lock &x->wait#26 irq_context: softirq &fq->mq_flush_lock &x->wait#26 &p->pi_lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 &type->s_umount_key#32 sb_writers#4 &obj_hash[i].lock irq_context: 0 kn->active#5 fs_reclaim irq_context: 0 kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#5 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &obj_hash[i].lock irq_context: 0 kn->active#5 &c->lock irq_context: 0 &p->lock &of->mutex kn->active#5 fs_reclaim irq_context: 0 &p->lock &of->mutex kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#5 &c->lock irq_context: 0 &p->lock &of->mutex kn->active#5 &____s->seqcount irq_context: 0 &p->lock &of->mutex kn->active#5 pool_lock#2 irq_context: 0 &p->lock &of->mutex kn->active#5 &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#5 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &n->list_lock &c->lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim &rq->__lock irq_context: 0 &p->lock remove_cache_srcu irq_context: 0 &p->lock remove_cache_srcu quarantine_lock irq_context: 0 kn->active#6 fs_reclaim irq_context: 0 kn->active#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#6 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#6 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#6 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#6 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#7 fs_reclaim irq_context: 0 kn->active#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#7 &c->lock irq_context: 0 kn->active#7 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#7 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#7 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#8 fs_reclaim irq_context: 0 kn->active#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#8 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#8 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#8 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#8 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#9 fs_reclaim irq_context: 0 kn->active#9 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#9 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#9 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#9 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &n->list_lock irq_context: 0 &p->lock &n->list_lock &c->lock irq_context: 0 kn->active#10 fs_reclaim irq_context: 0 kn->active#10 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#10 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#10 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#10 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#5 &n->list_lock irq_context: 0 kn->active#5 &n->list_lock &c->lock irq_context: 0 kn->active#11 fs_reclaim irq_context: 0 kn->active#11 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#11 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#11 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#11 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &____s->seqcount irq_context: 0 kn->active#9 &c->lock irq_context: 0 kn->active#9 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#9 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#11 &c->lock irq_context: 0 &type->i_mutex_dir_key#4 fs_reclaim &rq->__lock irq_context: 0 kn->active#12 fs_reclaim irq_context: 0 kn->active#12 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#12 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#12 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#12 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#5 &n->list_lock irq_context: 0 &p->lock &of->mutex kn->active#5 &n->list_lock &c->lock irq_context: 0 &p->lock &of->mutex kn->active#5 quarantine_lock irq_context: 0 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#12 &c->lock irq_context: 0 kn->active#13 fs_reclaim irq_context: 0 kn->active#13 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#13 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#13 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#13 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu quarantine_lock irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu &c->lock irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu &n->list_lock irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#14 fs_reclaim irq_context: 0 kn->active#14 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#14 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#14 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#14 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#14 fs_reclaim irq_context: 0 &p->lock &of->mutex kn->active#14 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#14 pool_lock#2 irq_context: 0 &p->lock &of->mutex kn->active#14 &obj_hash[i].lock irq_context: 0 &p->lock &of->mutex kn->active#14 &c->lock irq_context: 0 &p->lock remove_cache_srcu &c->lock irq_context: 0 &p->lock remove_cache_srcu &n->list_lock irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &p->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#9 &n->list_lock irq_context: 0 kn->active#9 &n->list_lock &c->lock irq_context: 0 kn->active#14 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#14 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#7 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &device->physical_node_lock irq_context: 0 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rnp->exp_lock irq_context: 0 rcu_state.exp_mutex irq_context: 0 rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: softirq &x->wait#26 &p->pi_lock &rq->__lock irq_context: softirq &x->wait#26 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &x->wait#26 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 udc_lock irq_context: 0 kn->active#5 remove_cache_srcu irq_context: 0 kn->active#5 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#5 remove_cache_srcu &c->lock irq_context: 0 kn->active#5 remove_cache_srcu &n->list_lock irq_context: 0 kn->active#5 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &n->list_lock &c->lock irq_context: 0 rcu_state.exp_mutex.wait_lock irq_context: 0 kn->active#11 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#11 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &dentry->d_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu fs_reclaim irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &group->notification_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &group->notification_waitq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &group->notification_waitq &p->pi_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &group->notification_waitq &p->pi_lock &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &group->notification_waitq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &n->list_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fw_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &____s->seqcount irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &c->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &obj_hash[i].lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu quarantine_lock irq_context: 0 tomoyo_ss rcu_read_lock rename_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &rq->__lock irq_context: 0 kn->active#5 &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &obj_hash[i].lock pool_lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rq->__lock irq_context: 0 kn->active#5 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &n->list_lock &c->lock irq_context: 0 &p->lock &rq->__lock irq_context: 0 &p->lock &of->mutex &rq->__lock irq_context: 0 &p->lock &of->mutex kn->active#5 &device->physical_node_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 quarantine_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sem->wait_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 kn->active#15 fs_reclaim irq_context: 0 kn->active#15 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#15 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#15 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#15 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#15 dev_base_lock irq_context: 0 kn->active#16 fs_reclaim irq_context: 0 kn->active#16 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#16 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#16 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#16 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#16 dev_base_lock irq_context: 0 kn->active#17 fs_reclaim irq_context: 0 kn->active#17 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#17 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#17 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#17 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#18 fs_reclaim irq_context: 0 kn->active#18 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#18 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#18 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#18 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#18 dev_base_lock irq_context: 0 kn->active#19 fs_reclaim irq_context: 0 kn->active#19 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#19 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#19 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#19 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#19 dev_base_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 kn->active#20 fs_reclaim irq_context: 0 kn->active#20 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#20 &c->lock irq_context: 0 kn->active#20 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#20 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#20 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#20 dev_base_lock irq_context: 0 kn->active#21 fs_reclaim irq_context: 0 kn->active#21 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#21 pool_lock#2 irq_context: 0 kn->active#21 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#21 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#21 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &of->mutex irq_context: 0 &of->mutex kn->active#21 &dev->power.lock irq_context: 0 &of->mutex kn->active#21 pci_lock irq_context: 0 &of->mutex kn->active#21 pci_lock pci_config_lock irq_context: 0 kn->active#22 fs_reclaim irq_context: 0 kn->active#22 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#22 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#22 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#22 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#23 fs_reclaim irq_context: 0 kn->active#23 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#23 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#23 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#23 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#4 rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 &obj_hash[i].lock irq_context: 0 kn->active#24 fs_reclaim irq_context: 0 kn->active#24 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#24 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#24 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#24 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#25 fs_reclaim irq_context: 0 kn->active#25 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#25 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#25 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#25 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#26 fs_reclaim irq_context: 0 kn->active#26 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#26 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#26 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#26 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#27 fs_reclaim irq_context: 0 kn->active#27 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#27 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#27 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#27 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#28 fs_reclaim irq_context: 0 kn->active#28 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#28 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#28 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#28 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#28 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#28 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#29 fs_reclaim irq_context: 0 kn->active#29 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#29 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#29 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#29 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#30 fs_reclaim irq_context: 0 kn->active#30 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#30 &c->lock irq_context: 0 kn->active#30 &n->list_lock irq_context: 0 kn->active#30 &n->list_lock &c->lock irq_context: 0 kn->active#30 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#30 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#30 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#31 fs_reclaim irq_context: 0 kn->active#31 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#31 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#31 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#31 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#32 fs_reclaim irq_context: 0 kn->active#32 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#32 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#32 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#32 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#33 fs_reclaim irq_context: 0 kn->active#33 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#33 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#33 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#33 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#33 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#33 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &n->list_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &n->list_lock &c->lock irq_context: 0 &ep->mtx &pipe->rd_wait irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#27 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#27 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#25 &c->lock irq_context: 0 kn->active#27 &c->lock irq_context: 0 kn->active#29 &c->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &n->list_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &n->list_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &n->list_lock &c->lock irq_context: 0 kn->active#24 &c->lock irq_context: 0 kn->active#24 &n->list_lock irq_context: 0 kn->active#24 &n->list_lock &c->lock irq_context: 0 kn->active#28 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#25 &____s->seqcount irq_context: 0 sb_writers#5 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu irq_context: 0 sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#26 &c->lock irq_context: 0 kn->active#26 &n->list_lock irq_context: 0 kn->active#26 &n->list_lock &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &n->list_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#2 &sem->wait_lock irq_context: 0 &type->i_mutex_dir_key#2 &rq->__lock irq_context: 0 &type->i_mutex_dir_key#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sem->wait_lock irq_context: 0 sb_writers &p->pi_lock irq_context: 0 sb_writers &p->pi_lock &rq->__lock irq_context: 0 sb_writers &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock &of->mutex kn->active#5 udc_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#13 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#11 &____s->seqcount irq_context: 0 kn->active#6 &c->lock irq_context: 0 kn->active#6 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#14 &c->lock irq_context: 0 kn->active#14 &n->list_lock irq_context: 0 kn->active#14 &n->list_lock &c->lock irq_context: 0 &p->lock &of->mutex kn->active#14 &n->list_lock irq_context: 0 &p->lock &of->mutex kn->active#14 &n->list_lock &c->lock irq_context: 0 kn->active#10 &c->lock irq_context: 0 &mousedev->client_lock irq_context: 0 &mousedev->mutex#2 irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu &c->lock irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu &n->list_lock irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#5 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &pcp->lock &zone->lock irq_context: 0 &p->lock &of->mutex kn->active#5 &pcp->lock &zone->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &fsnotify_mark_srcu irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &fsnotify_mark_srcu fs_reclaim irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &fsnotify_mark_srcu fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &fsnotify_mark_srcu pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &fsnotify_mark_srcu &group->notification_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &fsnotify_mark_srcu &group->notification_waitq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &pcp->lock &zone->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 kn->active#8 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 &p->lock &of->mutex kn->active#5 fw_lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_node_0 irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &rq->__lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#9 &rq->__lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 &obj_hash[i].lock irq_context: 0 &p->lock rcu_node_0 irq_context: 0 &p->lock &rcu_state.expedited_wq irq_context: 0 &p->lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &p->lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &p->lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock rcu_node_0 irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.expedited_wq irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &rq->__lock irq_context: 0 kn->active#10 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#10 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &n->list_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &n->list_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sb_writers#8 kn->active#5 fs_reclaim irq_context: 0 sb_writers#8 kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &hctx->lock irq_context: 0 rcu_read_lock &hctx->lock irq_context: 0 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 kn->active#12 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#12 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_node_0 irq_context: 0 &kernfs_locks->open_file_mutex[count] &rcu_state.expedited_wq irq_context: 0 &kernfs_locks->open_file_mutex[count] &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &kernfs_locks->open_file_mutex[count] &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &kernfs_locks->open_file_mutex[count] &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &kernfs_locks->open_file_mutex[count] &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->rd_wait &ep->lock irq_context: 0 &pipe->rd_wait &ep->lock &ep->wq irq_context: 0 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &ep->lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &ep->lock &ep->wq irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx rcu_read_lock &pipe->rd_wait irq_context: 0 &ep->mtx &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &n->list_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &n->list_lock &c->lock irq_context: 0 &sighand->signalfd_wqh irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &p->pi_lock irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &p->pi_lock &rq->__lock irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock irq_context: 0 &sb->s_type->i_mutex_key#14 irq_context: 0 mapping.invalidate_lock#2 irq_context: 0 mapping.invalidate_lock#2 mmu_notifier_invalidate_range_start irq_context: 0 mapping.invalidate_lock#2 &____s->seqcount irq_context: 0 mapping.invalidate_lock#2 pool_lock#2 irq_context: 0 mapping.invalidate_lock#2 &xa->xa_lock#7 irq_context: 0 mapping.invalidate_lock#2 &xa->xa_lock#7 pool_lock#2 irq_context: 0 mapping.invalidate_lock#2 lock#4 irq_context: 0 mapping.invalidate_lock#2 tk_core.seq.seqcount irq_context: 0 mapping.invalidate_lock#2 &dd->lock irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock &dd->lock irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 mapping.invalidate_lock#2 lock#4 &lruvec->lru_lock irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 mapping.invalidate_lock#2 &c->lock irq_context: 0 kn->active#24 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#24 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#27 &n->list_lock irq_context: 0 kn->active#27 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 &p->lock fs_reclaim &rq->__lock irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock pool_lock#2 irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock &____s->seqcount irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock &c->lock irq_context: 0 mapping.invalidate_lock#2 &n->list_lock irq_context: 0 mapping.invalidate_lock#2 &n->list_lock &c->lock irq_context: 0 mapping.invalidate_lock#2 &rq->__lock irq_context: 0 kn->active#29 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#29 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 &eli->li_list_mtx &obj_hash[i].lock irq_context: 0 &eli->li_list_mtx pool_lock#2 irq_context: 0 ext4_li_mtx irq_context: 0 ext4_li_mtx &eli->li_list_mtx irq_context: 0 ext4_li_mtx &obj_hash[i].lock irq_context: 0 ext4_li_mtx pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 batched_entropy_u32.lock crngs.lock irq_context: 0 &p->lock &of->mutex kn->active#5 &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#26 &____s->seqcount irq_context: 0 kn->active#29 &n->list_lock irq_context: 0 kn->active#29 &n->list_lock &c->lock irq_context: 0 kn->active#29 remove_cache_srcu irq_context: 0 kn->active#29 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &rfkill->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss quarantine_lock irq_context: 0 kn->active#25 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#25 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss quarantine_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sem->wait_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 batched_entropy_u8.lock irq_context: 0 kn->active#5 kfence_freelist_lock irq_context: 0 &mm->mmap_lock &lruvec->lru_lock irq_context: 0 sb_writers &rq->__lock irq_context: 0 kn->active#25 &n->list_lock irq_context: 0 kn->active#5 pool_lock#2 irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 tk_core.seq.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 &____s->seqcount#4 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 &____s->seqcount#4 &____s->seqcount#4/1 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock/2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock/2 &dentry->d_lock/3 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key &xa->xa_lock#7 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 lock#4 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 lock#5 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &lruvec->lru_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &info->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &xa->xa_lock#7 irq_context: 0 kn->active#26 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#30 &____s->seqcount irq_context: 0 kn->active#30 remove_cache_srcu irq_context: 0 kn->active#30 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#30 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#29 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 kn->active#29 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 kn->active#28 remove_cache_srcu irq_context: 0 kn->active#28 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#34 fs_reclaim irq_context: 0 kn->active#34 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#34 &c->lock irq_context: 0 kn->active#34 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#34 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#34 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#26 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#28 &____s->seqcount irq_context: 0 kn->active#29 &kernfs_locks->open_file_mutex[count] remove_cache_srcu irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu irq_context: 0 kn->active#29 &kernfs_locks->open_file_mutex[count] remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 kn->active#29 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &c->lock irq_context: 0 kn->active#29 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &n->list_lock irq_context: 0 kn->active#29 &kernfs_locks->open_file_mutex[count] remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 kn->active#29 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &obj_hash[i].lock irq_context: 0 mapping.invalidate_lock#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 mapping.invalidate_lock#2 &xa->xa_lock#7 &c->lock irq_context: 0 kn->active#27 &kernfs_locks->open_file_mutex[count] remove_cache_srcu irq_context: 0 kn->active#27 &kernfs_locks->open_file_mutex[count] remove_cache_srcu quarantine_lock irq_context: 0 kn->active#30 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#2 &n->list_lock irq_context: 0 &type->i_mutex_dir_key#2 &n->list_lock &c->lock irq_context: 0 kn->active#25 &n->list_lock &c->lock irq_context: 0 tomoyo_ss &base->lock irq_context: 0 tomoyo_ss &base->lock &obj_hash[i].lock irq_context: 0 kn->active#35 fs_reclaim irq_context: 0 kn->active#35 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#35 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#35 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#35 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#36 fs_reclaim irq_context: 0 kn->active#36 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#36 &c->lock irq_context: 0 kn->active#36 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#36 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#36 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#37 fs_reclaim irq_context: 0 kn->active#37 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#37 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#37 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#37 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#37 &c->lock irq_context: 0 kn->active#37 &n->list_lock irq_context: 0 kn->active#37 &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &lock->wait_lock irq_context: 0 &sig->cred_guard_mutex &lock->wait_lock irq_context: 0 &sig->cred_guard_mutex &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#38 fs_reclaim irq_context: 0 kn->active#38 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#38 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#38 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#38 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#38 i2c_dev_list_lock irq_context: 0 &sig->cred_guard_mutex &n->list_lock &c->lock irq_context: 0 tomoyo_ss tomoyo_policy_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &sem->wait_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &sem->wait_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex key#5 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock tomoyo_policy_lock.wait_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock &of->mutex kn->active#5 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim &rq->__lock irq_context: 0 tomoyo_ss tomoyo_policy_lock tomoyo_policy_lock.wait_lock irq_context: 0 tomoyo_ss tomoyo_policy_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &n->list_lock irq_context: 0 tomoyo_ss tomoyo_policy_lock.wait_lock irq_context: 0 tomoyo_ss &p->pi_lock irq_context: 0 tomoyo_ss &p->pi_lock &rq->__lock irq_context: 0 tomoyo_ss &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock.wait_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &p->pi_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu irq_context: 0 &mm->mmap_lock remove_cache_srcu quarantine_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &c->lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &n->list_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &sem->wait_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock rcu_read_lock rcu_node_0 irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock &sem->wait_lock irq_context: 0 &mm->mmap_lock &p->pi_lock irq_context: 0 &mm->mmap_lock &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 videodev_lock irq_context: 0 &dev_instance->mutex irq_context: 0 &dev_instance->mutex fs_reclaim irq_context: 0 &dev_instance->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev_instance->mutex pool_lock#2 irq_context: 0 &dev_instance->mutex vicodec_core:1844:(hdl)->_lock irq_context: 0 &dev_instance->mutex &c->lock irq_context: 0 &dev_instance->mutex &vdev->fh_lock irq_context: 0 &mdev->req_queue_mutex irq_context: 0 &mdev->req_queue_mutex &dev_instance->mutex irq_context: 0 &mdev->req_queue_mutex &dev_instance->mutex &m2m_dev->job_spinlock irq_context: 0 &mdev->req_queue_mutex &dev_instance->mutex &q->done_wq irq_context: 0 &mdev->req_queue_mutex &dev_instance->mutex &q->mmap_lock irq_context: 0 &mdev->req_queue_mutex &dev_instance->mutex &obj_hash[i].lock irq_context: 0 &mdev->req_queue_mutex &dev_instance->mutex pool_lock#2 irq_context: 0 &mdev->req_queue_mutex &vdev->fh_lock irq_context: 0 &mdev->req_queue_mutex &mdev->graph_mutex irq_context: 0 &mdev->req_queue_mutex vicodec_core:1844:(hdl)->_lock irq_context: 0 &mdev->req_queue_mutex vicodec_core:1844:(hdl)->_lock &obj_hash[i].lock irq_context: 0 &mdev->req_queue_mutex vicodec_core:1844:(hdl)->_lock pool_lock#2 irq_context: 0 &mdev->req_queue_mutex &obj_hash[i].lock irq_context: 0 &mdev->req_queue_mutex pool_lock#2 irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &n->list_lock irq_context: 0 &mm->mmap_lock &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock &folio_wait_table[i] &p->pi_lock irq_context: 0 &mm->mmap_lock &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 fh->state->lock irq_context: 0 &vdev->fh_lock irq_context: 0 &dev->dev_mutex irq_context: 0 &dev->dev_mutex fs_reclaim irq_context: 0 &dev->dev_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->dev_mutex pool_lock#2 irq_context: 0 &dev->dev_mutex vim2m:1183:(hdl)->_lock irq_context: 0 &dev->dev_mutex &obj_hash[i].lock irq_context: 0 &dev->dev_mutex &vdev->fh_lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 kn->active#39 fs_reclaim irq_context: 0 kn->active#39 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mdev->req_queue_mutex vim2m:1183:(hdl)->_lock irq_context: 0 &mdev->req_queue_mutex vim2m:1183:(hdl)->_lock &obj_hash[i].lock irq_context: 0 &mdev->req_queue_mutex vim2m:1183:(hdl)->_lock pool_lock#2 irq_context: 0 &mdev->req_queue_mutex &dev->dev_mutex irq_context: 0 &mdev->req_queue_mutex &dev->dev_mutex &m2m_dev->job_spinlock irq_context: 0 &mdev->req_queue_mutex &dev->dev_mutex &q->done_wq irq_context: 0 &mdev->req_queue_mutex &dev->dev_mutex &q->mmap_lock irq_context: 0 &mdev->req_queue_mutex &dev->dev_mutex &obj_hash[i].lock irq_context: 0 &mdev->req_queue_mutex &dev->dev_mutex pool_lock#2 irq_context: 0 kn->active#39 &c->lock irq_context: 0 kn->active#39 &____s->seqcount irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &sem->wait_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &sem->wait_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#4 &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock quarantine_lock irq_context: 0 rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: softirq &(&wb->dwork)->timer irq_context: softirq &(&wb->dwork)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&wb->dwork)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&wb->dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&wb->dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&wb->dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->work_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &p->sequence irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->work_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->work_lock &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 kn->active#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss quarantine_lock irq_context: 0 kn->active#37 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#37 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#37 &____s->seqcount irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu quarantine_lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu &c->lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu &n->list_lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock batched_entropy_u64.lock crngs.lock base_crng.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem &sem->wait_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vcapture->lock irq_context: 0 &mdev->graph_mutex irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock batched_entropy_u8.lock crngs.lock irq_context: 0 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem &sem->wait_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex &lock->wait_lock irq_context: 0 &mm->mmap_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &iint->mutex &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex &rq->__lock irq_context: 0 sb_writers#8 &of->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex#3 irq_context: 0 &mdev->req_queue_mutex &dev->mutex#3 irq_context: 0 &mdev->req_queue_mutex &dev->mutex#3 &vdev->fh_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem quarantine_lock irq_context: 0 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &sem->wait_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: softirq (&q->timeout) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 tasklist_lock &sighand->siglock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &folio_wait_table[i] &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback quarantine_lock irq_context: 0 &u->bindlock irq_context: 0 &u->bindlock fs_reclaim irq_context: 0 &u->bindlock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &u->bindlock pool_lock#2 irq_context: 0 &u->bindlock batched_entropy_u32.lock irq_context: 0 &u->bindlock &net->unx.table.locks[i] irq_context: 0 &u->bindlock &net->unx.table.locks[i] &net->unx.table.locks[i]/1 irq_context: 0 &u->bindlock &net->unx.table.locks[i]/1 irq_context: 0 &u->lock &u->lock/1 &sk->sk_peer_lock irq_context: 0 &u->lock &u->lock/1 &dentry->d_lock irq_context: 0 &u->lock &u->lock/1 &sk->sk_peer_lock &sk->sk_peer_lock/1 irq_context: 0 &u->lock &u->lock/1 &sk->sk_peer_lock/1 irq_context: 0 &u->iolock rcu_read_lock &ei->socket.wq.wait irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem pool_lock#2 irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock &rq->__lock irq_context: 0 &mm->mmap_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&journal->j_commit_timer) irq_context: softirq (&journal->j_commit_timer) &p->pi_lock irq_context: softirq (&journal->j_commit_timer) &p->pi_lock &rq->__lock irq_context: softirq (&journal->j_commit_timer) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_checkpoint_mutex irq_context: 0 &journal->j_checkpoint_mutex mmu_notifier_invalidate_range_start irq_context: 0 &journal->j_checkpoint_mutex pool_lock#2 irq_context: 0 &journal->j_checkpoint_mutex tk_core.seq.seqcount irq_context: 0 &journal->j_checkpoint_mutex &dd->lock irq_context: 0 &journal->j_checkpoint_mutex &obj_hash[i].lock irq_context: 0 &journal->j_checkpoint_mutex &base->lock irq_context: 0 &journal->j_checkpoint_mutex &base->lock &obj_hash[i].lock irq_context: 0 &journal->j_checkpoint_mutex rcu_read_lock &pool->lock irq_context: 0 &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &journal->j_checkpoint_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_checkpoint_mutex bit_wait_table + i irq_context: 0 &journal->j_checkpoint_mutex &rq->__lock irq_context: 0 &journal->j_checkpoint_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_checkpoint_mutex &journal->j_state_lock irq_context: 0 &journal->j_state_lock &journal->j_wait_updates irq_context: 0 &journal->j_list_lock irq_context: 0 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 &ei->i_es_lock irq_context: 0 lock#4 irq_context: 0 &mapping->private_lock irq_context: 0 &ret->b_state_lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock irq_context: 0 &ei->i_es_lock key#2 irq_context: 0 &dd->lock irq_context: 0 &journal->j_state_lock irq_context: 0 &journal->j_state_lock &journal->j_list_lock irq_context: 0 rcu_read_lock &dd->lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock rcu_read_lock &memcg->move_lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock rcu_read_lock &xa->xa_lock#7 irq_context: 0 &ret->b_state_lock &journal->j_list_lock &sb->s_type->i_lock_key#3 irq_context: 0 &ret->b_state_lock &journal->j_list_lock &wb->list_lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock &wb->list_lock &sb->s_type->i_lock_key#3 irq_context: 0 &ret->b_state_lock &journal->j_list_lock rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 &journal->j_state_lock &journal->j_list_lock irq_context: 0 &sbi->s_md_lock irq_context: 0 &journal->j_fc_wait irq_context: 0 &journal->j_history_lock irq_context: 0 tasklist_lock &sighand->siglock batched_entropy_u8.lock irq_context: 0 tasklist_lock &sighand->siglock kfence_freelist_lock irq_context: 0 &sighand->siglock &meta->lock irq_context: 0 &sighand->siglock kfence_freelist_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 &pcp->lock &zone->lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 kn->active#37 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 kn->active#37 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &folio_wait_table[i] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem quarantine_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock pool_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim &obj_hash[i].lock irq_context: hardirq &x->wait#5 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 batched_entropy_u8.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 kfence_freelist_lock irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.gp_wq irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &meta->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem kfence_freelist_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 quarantine_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page) irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &pcp->lock &zone->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 tomoyo_ss &cfs_rq->removed.lock irq_context: 0 kn->active#37 remove_cache_srcu irq_context: 0 kn->active#37 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#37 remove_cache_srcu &c->lock irq_context: 0 kn->active#37 remove_cache_srcu &n->list_lock irq_context: 0 kn->active#37 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &meta->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock kfence_freelist_lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.gp_wq irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override &n->list_lock irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &tsk->futex_exit_mutex &mm->mmap_lock &rq->__lock irq_context: 0 remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#5 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &obj_hash[i].lock pool_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 tk_core.seq.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 &____s->seqcount#4 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 &____s->seqcount#4 &____s->seqcount#4/1 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount &dentry->d_lock/2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount &dentry->d_lock/2 &dentry->d_lock/3 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 krc.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &xa->xa_lock#7 irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4/4 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &obj_hash[i].lock pool_lock irq_context: 0 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &meta->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 &p->lock remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &p->lock remove_cache_srcu &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &ep->mtx remove_cache_srcu irq_context: 0 &ep->mtx remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss rcu_read_lock &cfs_rq->removed.lock irq_context: 0 tomoyo_ss rcu_read_lock &obj_hash[i].lock irq_context: 0 tomoyo_ss rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &obj_hash[i].lock pool_lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &obj_hash[i].lock irq_context: 0 &ep->mtx key#11 irq_context: 0 &ep->mtx &pcp->lock &zone->lock irq_context: 0 &ep->mtx &pcp->lock &zone->lock &____s->seqcount irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: softirq drivers/base/dd.c:321 irq_context: softirq drivers/base/dd.c:321 rcu_read_lock &pool->lock irq_context: softirq drivers/base/dd.c:321 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq drivers/base/dd.c:321 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq drivers/base/dd.c:321 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq drivers/base/dd.c:321 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work device_links_lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work device_links_lock &k->list_lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work device_links_lock &k->k_lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work deferred_probe_mutex irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work deferred_probe_work irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &base->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#5 &mm->mmap_lock fs_reclaim irq_context: 0 &type->i_mutex_dir_key#5 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#5 &mm->mmap_lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#5 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &type->i_mutex_dir_key#5 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 rcu_read_lock pgd_lock irq_context: 0 rcu_read_lock key irq_context: 0 rcu_read_lock pcpu_lock irq_context: 0 rcu_read_lock percpu_counters_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->list_lock &p->sequence irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->list_lock key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->list_lock &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->list_lock &type->s_umount_key#32 &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &wb->list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->list_lock &type->s_umount_key#40 &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &memcg->move_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#7 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#7 &s->s_inode_wblist_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 lock#4 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 lock#5 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &wb->list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &wb->list_lock &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock &obj_hash[i].lock pool_lock irq_context: softirq rcu_read_lock &memcg->move_lock irq_context: softirq rcu_read_lock &xa->xa_lock#7 irq_context: softirq rcu_read_lock &xa->xa_lock#7 &obj_hash[i].lock irq_context: softirq rcu_read_lock &xa->xa_lock#7 &base->lock irq_context: softirq rcu_read_lock &xa->xa_lock#7 &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &xa->xa_lock#7 key#10 irq_context: softirq rcu_read_lock &xa->xa_lock#7 key#12 irq_context: softirq rcu_read_lock &xa->xa_lock#7 &wb->work_lock irq_context: softirq rcu_read_lock &xa->xa_lock#7 &wb->work_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &xa->xa_lock#7 &wb->work_lock &base->lock irq_context: softirq rcu_read_lock &xa->xa_lock#7 &wb->work_lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &xa->xa_lock#7 &s->s_inode_wblist_lock irq_context: 0 kn->active#39 remove_cache_srcu irq_context: 0 kn->active#39 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 remove_cache_srcu irq_context: 0 sb_writers &type->i_mutex_dir_key/1 remove_cache_srcu quarantine_lock irq_context: softirq &(&wb->bw_dwork)->timer irq_context: softirq &(&wb->bw_dwork)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&wb->bw_dwork)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&wb->bw_dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&wb->bw_dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&wb->bw_dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->bw_dwork)->work) irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->bw_dwork)->work) &wb->list_lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 batched_entropy_u8.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 kfence_freelist_lock irq_context: 0 kn->active#39 remove_cache_srcu &c->lock irq_context: 0 kn->active#39 remove_cache_srcu &n->list_lock irq_context: 0 kn->active#39 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &rq->__lock irq_context: 0 tomoyo_ss pgd_lock irq_context: 0 tomoyo_ss key irq_context: 0 tomoyo_ss pcpu_lock irq_context: 0 tomoyo_ss percpu_counters_lock irq_context: 0 &type->i_mutex_dir_key#5 sb_writers#5 &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 sb_writers#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock quarantine_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &obj_hash[i].lock irq_context: hardirq|softirq &x->wait#12 &p->pi_lock &rq->__lock irq_context: hardirq|softirq &x->wait#12 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &n->list_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &n->list_lock &c->lock irq_context: 0 &p->lock &of->mutex kn->active#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 krc.lock &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 krc.lock &base->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 krc.lock &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu &rq->__lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock pgd_lock irq_context: 0 &mm->mmap_lock key irq_context: 0 &mm->mmap_lock pcpu_lock irq_context: 0 &mm->mmap_lock percpu_counters_lock irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &p->pi_lock &cfs_rq->removed.lock irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 kn->active#40 fs_reclaim irq_context: 0 kn->active#40 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#40 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#40 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#40 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#39 &n->list_lock irq_context: 0 kn->active#39 &n->list_lock &c->lock irq_context: 0 kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &vma->vm_lock->lock &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 kn->active#41 fs_reclaim irq_context: 0 kn->active#41 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#41 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#41 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#41 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 kn->active#42 fs_reclaim irq_context: 0 kn->active#42 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#42 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#42 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#42 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#42 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#42 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#43 fs_reclaim irq_context: 0 kn->active#43 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#43 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#43 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#43 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#40 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#40 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &base->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &lo->lo_mutex irq_context: 0 &disk->open_mutex &lo->lo_mutex irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &disk->open_mutex nbd_index_mutex irq_context: 0 &disk->open_mutex nbd_index_mutex &nbd->config_lock irq_context: 0 &disk->open_mutex nbd_index_mutex &nbd->config_lock mmu_notifier_invalidate_range_start irq_context: 0 &disk->open_mutex nbd_index_mutex &nbd->config_lock pool_lock#2 irq_context: 0 &disk->open_mutex &nbd->config_lock irq_context: 0 &disk->open_mutex &nbd->config_lock &bdev->bd_size_lock irq_context: 0 &disk->open_mutex &nbd->config_lock &q->queue_lock irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 &disk->open_mutex &nbd->config_lock set->srcu irq_context: 0 &disk->open_mutex &nbd->config_lock &obj_hash[i].lock irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) &ACCESS_PRIVATE(sdp, lock) irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &disk->open_mutex &nbd->config_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ACCESS_PRIVATE(ssp->srcu_sup, lock) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ACCESS_PRIVATE(ssp->srcu_sup, lock) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &rq->__lock irq_context: 0 &disk->open_mutex &nbd->config_lock &x->wait#3 irq_context: 0 &disk->open_mutex &nbd->config_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex &ACCESS_PRIVATE(ssp->srcu_sup, lock) irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) &rq->__lock irq_context: 0 &disk->open_mutex &nbd->config_lock set->srcu irq_context: 0 &disk->open_mutex &nbd->config_lock pool_lock#2 irq_context: 0 &disk->open_mutex nbd_index_mutex &nbd->config_lock &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &rq->__lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#5 rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock &of->mutex kn->active#5 &rfkill->lock irq_context: 0 &mousedev->mutex/1 irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_node_0 irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &rq->__lock irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 remove_cache_srcu rcu_node_0 irq_context: 0 kn->active#5 remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 kn->active#5 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 kn->active#5 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 kn->active#5 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 &obj_hash[i].lock irq_context: 0 kn->active#15 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#19 &c->lock irq_context: 0 kn->active#19 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 tomoyo_ss remove_cache_srcu rcu_node_0 irq_context: 0 tomoyo_ss remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 tomoyo_ss remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 tomoyo_ss remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 tomoyo_ss remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock &of->mutex kn->active#5 &base->lock irq_context: 0 &p->lock &of->mutex kn->active#5 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &rq->__lock irq_context: 0 kn->active#44 fs_reclaim irq_context: 0 kn->active#44 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#44 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#44 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#44 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#44 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#44 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 &disk->open_mutex &new->lock irq_context: 0 kn->active#16 &c->lock irq_context: 0 &disk->open_mutex &new->lock &mtdblk->cache_mutex irq_context: 0 kn->active#18 &c->lock irq_context: 0 kn->active#16 &n->list_lock irq_context: 0 kn->active#16 &n->list_lock &c->lock irq_context: 0 kn->active#17 &c->lock irq_context: 0 lock pidmap_lock &c->lock irq_context: 0 kn->active#19 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#15 &c->lock irq_context: 0 kn->active#17 &n->list_lock irq_context: 0 kn->active#17 &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &c->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&q->timeout_work) &tags->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&q->timeout_work) &obj_hash[i].lock irq_context: 0 (wq_completion)kblockd (work_completion)(&q->timeout_work) &base->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&q->timeout_work) &base->lock &obj_hash[i].lock irq_context: 0 &p->lock &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 kn->active#17 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#17 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &journal->j_state_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &journal->j_state_lock &base->lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &rq->__lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &wb->work_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &wb->work_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &wb->work_lock &base->lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 kn->active#15 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock &base->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 kn->active#15 &n->list_lock irq_context: 0 kn->active#15 &n->list_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#45 fs_reclaim irq_context: 0 kn->active#45 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#45 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#45 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#45 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mtd->master.chrdev_lock irq_context: 0 &mtd->master.chrdev_lock &mm->mmap_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 remove_cache_srcu &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 remove_cache_srcu &obj_hash[i].lock irq_context: softirq (&dom->period_timer) irq_context: softirq (&dom->period_timer) key#13 irq_context: softirq (&dom->period_timer) &p->sequence irq_context: softirq (&dom->period_timer) &obj_hash[i].lock irq_context: softirq (&dom->period_timer) &base->lock irq_context: softirq (&dom->period_timer) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rq->__lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#2 remove_cache_srcu irq_context: 0 &type->i_mutex_dir_key#2 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &pcp->lock &zone->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &pcp->lock &zone->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &c->lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &n->list_lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &base->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &base->lock &obj_hash[i].lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#2 rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#2 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 remove_cache_srcu rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#5 rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#5 rcu_node_0 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#2 &rq->__lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#2 remove_cache_srcu &c->lock irq_context: 0 &type->i_mutex_dir_key#2 remove_cache_srcu &n->list_lock irq_context: 0 &type->i_mutex_dir_key#2 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu fs_reclaim irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu &group->notification_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu &group->notification_waitq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu &group->notification_waitq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu &group->notification_waitq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu &group->notification_waitq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu &rq->__lock irq_context: 0 &mark->lock irq_context: 0 &group->inotify_data.idr_lock irq_context: 0 &group->inotify_data.idr_lock &obj_hash[i].lock irq_context: 0 &group->inotify_data.idr_lock pool_lock#2 irq_context: 0 destroy_lock irq_context: 0 fs/notify/mark.c:89 irq_context: 0 (reaper_work).work irq_context: 0 &x->wait#10 irq_context: 0 (wq_completion)events_unbound connector_reaper_work irq_context: 0 (wq_completion)events_unbound connector_reaper_work destroy_lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(sdp, lock) irq_context: 0 (wq_completion)events_unbound connector_reaper_work &fsnotify_mark_srcu irq_context: 0 (wq_completion)events_unbound connector_reaper_work &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) &ACCESS_PRIVATE(sdp, lock) irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound connector_reaper_work &x->wait#3 irq_context: 0 (wq_completion)events_unbound connector_reaper_work &rq->__lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (reaper_work).work irq_context: 0 (wq_completion)events_unbound (reaper_work).work destroy_lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(sdp, lock) irq_context: 0 (wq_completion)events_unbound (reaper_work).work &fsnotify_mark_srcu irq_context: 0 (wq_completion)events_unbound (reaper_work).work &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &x->wait#3 irq_context: 0 (wq_completion)events_unbound (reaper_work).work &rq->__lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ACCESS_PRIVATE(ssp->srcu_sup, lock) &ACCESS_PRIVATE(sdp, lock) irq_context: 0 (wq_completion)events_unbound connector_reaper_work &____s->seqcount irq_context: 0 (wq_completion)events_unbound connector_reaper_work pool_lock#2 irq_context: 0 (wq_completion)events_unbound connector_reaper_work rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (reaper_work).work pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&barr->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&barr->work) &x->wait#10 irq_context: 0 (wq_completion)events_unbound (work_completion)(&barr->work) &x->wait#10 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) pool_lock#2 irq_context: 0 &sig->cred_guard_mutex sb_writers#4 batched_entropy_u8.lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 kfence_freelist_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &meta->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock &n->list_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock &n->list_lock &c->lock irq_context: 0 tomoyo_ss tomoyo_policy_lock &n->list_lock irq_context: 0 tomoyo_ss tomoyo_policy_lock &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &base->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss batched_entropy_u8.lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss kfence_freelist_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &meta->lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss &c->lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss tomoyo_policy_lock irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 &n->list_lock irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 &n->list_lock &c->lock irq_context: 0 &iint->mutex sb_writers#4 batched_entropy_u8.lock irq_context: 0 &iint->mutex sb_writers#4 kfence_freelist_lock irq_context: 0 &iint->mutex sb_writers#4 &meta->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &u->bindlock &net->unx.table.locks[i]/1 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 userns_state_mutex irq_context: 0 &ei->xattr_sem &mapping->private_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &c->lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock lock#4 &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &n->list_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &n->list_lock &c->lock irq_context: 0 sb_writers#4 jbd2_handle &pcp->lock &zone->lock irq_context: 0 sb_writers#4 jbd2_handle &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &iint->mutex &n->list_lock irq_context: 0 &iint->mutex &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem quarantine_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 remove_cache_srcu irq_context: 0 &sig->cred_guard_mutex sb_writers#4 remove_cache_srcu quarantine_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 remove_cache_srcu &c->lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 remove_cache_srcu &n->list_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex sb_writers#4 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#4 &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &p->alloc_lock &x->wait#25 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex rlock-AF_NETLINK irq_context: 0 rtnl_mutex (inetaddr_validator_chain).rwsem irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fs_reclaim irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem pcpu_alloc_mutex irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem pcpu_alloc_mutex pcpu_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fib_info_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &dir->lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &____s->seqcount irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem nl_table_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem nl_table_wait.lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex _xmit_LOOPBACK irq_context: 0 rtnl_mutex netpoll_srcu irq_context: 0 rtnl_mutex &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex &im->lock irq_context: 0 rtnl_mutex fib_info_lock irq_context: 0 rtnl_mutex rcu_read_lock &dir->lock#2 irq_context: 0 rtnl_mutex cbs_list_lock irq_context: 0 rtnl_mutex &ndev->lock irq_context: 0 rtnl_mutex &idev->mc_lock irq_context: 0 rtnl_mutex (inet6addr_validator_chain).rwsem irq_context: 0 rtnl_mutex rcu_read_lock &net->ipv6.addrconf_hash_lock irq_context: 0 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 rtnl_mutex &ifa->lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex &tb->tb6_lock irq_context: 0 rtnl_mutex &tb->tb6_lock &____s->seqcount irq_context: 0 rtnl_mutex &tb->tb6_lock pool_lock#2 irq_context: 0 rtnl_mutex &tb->tb6_lock &c->lock irq_context: 0 rtnl_mutex &tb->tb6_lock nl_table_lock irq_context: 0 rtnl_mutex &tb->tb6_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &tb->tb6_lock nl_table_wait.lock irq_context: 0 rtnl_mutex &ndev->lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_INET irq_context: softirq rcu_callback &dir->lock#2 irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &ei->i_es_lock key#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_INET6 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fs_reclaim irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_es_lock key#6 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &xa->xa_lock#7 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 lock#4 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &dd->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 bit_wait_table + i irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 inode_hash_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 inode_hash_lock &s->s_inode_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &dentry->d_lock &lru->node[i].lock irq_context: 0 sb_writers#5 tomoyo_ss irq_context: 0 sb_writers#5 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#5 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#5 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#5 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#5 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#5 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#5 tomoyo_ss rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &xattrs->lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock pgd_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock key irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock pcpu_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock percpu_counters_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &mapping->i_mmap_rwsem irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &sb->s_type->i_lock_key &xa->xa_lock#7 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 lock#5 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &lruvec->lru_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock irq_context: 0 &f->f_pos_lock sb_writers#5 irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 &xa->xa_lock#7 irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 lock#4 irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 &info->lock irq_context: 0 &sb->s_type->i_lock_key#4 irq_context: 0 &sb->s_type->i_lock_key#4 &dentry->d_lock irq_context: 0 sk_lock-AF_INET irq_context: 0 sk_lock-AF_INET slock-AF_INET irq_context: 0 slock-AF_INET irq_context: 0 sk_lock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 slock-AF_INET6 irq_context: 0 slock-AF_INET6 irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss tomoyo_policy_lock &n->list_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss tomoyo_policy_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET &table->hash[i].lock irq_context: 0 sk_lock-AF_INET &table->hash[i].lock clock-AF_INET irq_context: 0 sk_lock-AF_INET &table->hash[i].lock &table->hash2[i].lock irq_context: 0 sk_lock-AF_INET6 &table->hash[i].lock irq_context: 0 sk_lock-AF_INET6 &table->hash[i].lock clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 sk_lock-AF_NETLINK &mm->mmap_lock irq_context: 0 sk_lock-AF_NETLINK fs_reclaim irq_context: 0 sk_lock-AF_NETLINK fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_NETLINK &c->lock irq_context: 0 sk_lock-AF_NETLINK &n->list_lock irq_context: 0 sk_lock-AF_NETLINK &n->list_lock &c->lock irq_context: 0 sk_lock-AF_NETLINK pool_lock#2 irq_context: 0 sk_lock-AF_NETLINK free_vmap_area_lock irq_context: 0 sk_lock-AF_NETLINK free_vmap_area_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETLINK free_vmap_area_lock pool_lock#2 irq_context: 0 sk_lock-AF_NETLINK vmap_area_lock irq_context: 0 sk_lock-AF_NETLINK &____s->seqcount irq_context: 0 sk_lock-AF_NETLINK pcpu_alloc_mutex irq_context: 0 sk_lock-AF_NETLINK pcpu_alloc_mutex pcpu_lock irq_context: 0 sk_lock-AF_NETLINK &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETLINK pack_mutex irq_context: 0 sk_lock-AF_NETLINK batched_entropy_u32.lock irq_context: 0 sk_lock-AF_NETLINK text_mutex irq_context: 0 sk_lock-AF_NETLINK text_mutex ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_NETLINK &fp->aux->used_maps_mutex irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock pool_lock#2 irq_context: 0 rcu_read_lock &sb->s_type->i_lock_key#22 irq_context: 0 kn->active#46 fs_reclaim irq_context: 0 kn->active#46 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#46 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#46 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#46 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock cpufreq_driver_lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &n->list_lock irq_context: 0 rtnl_mutex &n->list_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &ndev->lock &ifa->lock irq_context: 0 &type->i_mutex_dir_key#4 &dentry->d_lock &wq#2 irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex irq_context: 0 cb_lock &c->lock irq_context: 0 cb_lock &n->list_lock irq_context: 0 cb_lock &n->list_lock &c->lock irq_context: 0 cb_lock rtnl_mutex irq_context: 0 cb_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex &c->lock irq_context: 0 cb_lock genl_mutex &____s->seqcount irq_context: 0 sb_writers#3 &c->lock irq_context: 0 cb_lock genl_mutex &n->list_lock irq_context: 0 cb_lock genl_mutex &n->list_lock &c->lock irq_context: 0 dev_addr_sem irq_context: 0 &sb->s_type->i_mutex_key#9 &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &n->list_lock &c->lock irq_context: 0 sb_writers#3 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#3 tomoyo_ss &n->list_lock &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx irq_context: 0 cb_lock &rdev->wiphy.mtx irq_context: 0 cb_lock &rdev->wiphy.mtx fs_reclaim irq_context: 0 cb_lock &rdev->wiphy.mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &rdev->wiphy.mtx pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &n->list_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &n->list_lock &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx rlock-AF_NETLINK irq_context: 0 cb_lock nlk_cb_mutex-GENERIC irq_context: 0 cb_lock nlk_cb_mutex-GENERIC fs_reclaim irq_context: 0 cb_lock nlk_cb_mutex-GENERIC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock nlk_cb_mutex-GENERIC pool_lock#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &____s->seqcount irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex &obj_hash[i].lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex pool_lock#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex &rdev->wiphy.mtx irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &rdev->wiphy.mtx irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &rdev->wiphy.mtx &wdev->mtx irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rlock-AF_NETLINK irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &____s->seqcount irq_context: 0 cb_lock &____s->seqcount irq_context: 0 cb_lock quarantine_lock irq_context: 0 cb_lock remove_cache_srcu irq_context: 0 cb_lock remove_cache_srcu quarantine_lock irq_context: 0 cb_lock remove_cache_srcu &c->lock irq_context: 0 cb_lock remove_cache_srcu &n->list_lock irq_context: 0 cb_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 &pipe->rd_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock &meta->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock kfence_freelist_lock irq_context: softirq (&net->sctp.addr_wq_timer) irq_context: softirq (&net->sctp.addr_wq_timer) &net->sctp.addr_wq_lock irq_context: softirq (&net->sctp.addr_wq_timer) &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: softirq (&net->sctp.addr_wq_timer) &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 sb_writers#5 fs_reclaim irq_context: 0 sb_writers#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 sb_writers#5 mount_lock irq_context: 0 sb_writers#5 sb_writers#5 tk_core.seq.seqcount irq_context: 0 sb_writers#5 sb_writers#5 &sb->s_type->i_lock_key irq_context: 0 sb_writers#5 sb_writers#5 &wb->list_lock irq_context: 0 sb_writers#5 sb_writers#5 &wb->list_lock &sb->s_type->i_lock_key irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 remove_cache_srcu irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 remove_cache_srcu quarantine_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 remove_cache_srcu &c->lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 remove_cache_srcu &n->list_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#5 &sb->s_type->i_lock_key &xa->xa_lock#7 irq_context: 0 sb_writers#5 lock#4 irq_context: 0 sb_writers#5 lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#5 lock#5 irq_context: 0 sb_writers#5 &lruvec->lru_lock irq_context: 0 sb_writers#5 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock irq_context: softirq mm/memcontrol.c:589 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key irq_context: 0 rtnl_mutex &dev_addr_list_lock_key pool_lock#2 irq_context: 0 rtnl_mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &pnettable->lock irq_context: 0 rtnl_mutex smc_ib_devices.mutex irq_context: 0 &dentry->d_lock &dentry->d_lock &lru->node[i].lock irq_context: 0 rtnl_mutex napi_hash_lock irq_context: 0 rtnl_mutex lapb_list_lock irq_context: 0 rtnl_mutex remove_cache_srcu irq_context: 0 rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex x25_neigh_list_lock irq_context: 0 rtnl_mutex console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex console_lock console_srcu console_owner irq_context: 0 rtnl_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &u->lock &ei->socket.wq.wait irq_context: 0 rtnl_mutex _xmit_ETHER irq_context: 0 rtnl_mutex &tb->tb6_lock rlock-AF_NETLINK irq_context: 0 rtnl_mutex _xmit_SLIP irq_context: softirq (&eql->timer) irq_context: softirq (&eql->timer) &eql->queue.lock irq_context: softirq (&eql->timer) &obj_hash[i].lock irq_context: softirq (&eql->timer) &base->lock irq_context: softirq (&eql->timer) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 &root->kernfs_rwsem &sem->wait_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#4 &sem->wait_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &p->pi_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &sem->wait_lock irq_context: 0 rtnl_mutex &sem->wait_lock irq_context: 0 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &vi->refill_lock irq_context: softirq _xmit_ETHER#2 irq_context: 0 rtnl_mutex noop_qdisc.q.lock irq_context: 0 rtnl_mutex &rfkill->lock irq_context: 0 rtnl_mutex &local->chanctx_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx lweventlist_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx lweventlist_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_node_0 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &data->mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &data->mutex &rq->__lock irq_context: 0 rtnl_mutex _xmit_ETHER &local->filter_lock irq_context: 0 rtnl_mutex _xmit_ETHER &local->filter_lock pool_lock#2 irq_context: 0 rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy0 irq_context: 0 (wq_completion)phy0 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy0 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 rtnl_mutex uevent_sock_mutex &n->list_lock irq_context: 0 rtnl_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock _xmit_ETHER#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock noop_qdisc.q.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &sch->q.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex class irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex (&tbl->proxy_timer) irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex cbs_list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tn->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex nl_table_wait.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &base->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)phy1 irq_context: 0 (wq_completion)phy1 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy1 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 rtnl_mutex _xmit_ETHER &c->lock irq_context: 0 rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 rtnl_mutex _xmit_VOID irq_context: 0 &u->iolock &u->lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 &u->iolock &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &u->iolock &mm->mmap_lock fs_reclaim irq_context: 0 &u->iolock &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &u->iolock &mm->mmap_lock &____s->seqcount irq_context: 0 &u->iolock &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 &u->iolock &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 &u->iolock rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &iint->mutex remove_cache_srcu irq_context: 0 &sig->cred_guard_mutex &iint->mutex remove_cache_srcu quarantine_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex remove_cache_srcu &c->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex remove_cache_srcu &n->list_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &iint->mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &p->alloc_lock &x->wait#25 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#3 &xa->xa_lock#7 &n->list_lock irq_context: 0 &type->i_mutex_dir_key#3 &xa->xa_lock#7 &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &xa->xa_lock#7 &c->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex _xmit_X25 irq_context: 0 rtnl_mutex lapb_list_lock irq_context: 0 rtnl_mutex lapb_list_lock pool_lock#2 irq_context: 0 rtnl_mutex lapb_list_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex lapb_list_lock &base->lock irq_context: 0 rtnl_mutex lapb_list_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &lapbeth->up_lock irq_context: 0 rtnl_mutex &lapb->lock irq_context: 0 rtnl_mutex &lapb->lock pool_lock#2 irq_context: 0 rtnl_mutex &lapb->lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 rtnl_mutex &lapb->lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rtnl_mutex &lapb->lock rcu_read_lock_bh pool_lock#2 irq_context: 0 rtnl_mutex &lapb->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &lapb->lock &base->lock irq_context: 0 rtnl_mutex &lapb->lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &ei->xattr_sem &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->xattr_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &xa->xa_lock#7 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &wb->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock_bh &base->lock irq_context: 0 rtnl_mutex rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ifa->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock nl_table_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rlock-AF_NETLINK irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock nl_table_wait.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 sk_lock-AF_INET6 &____s->seqcount#8 irq_context: 0 sk_lock-AF_INET6 batched_entropy_u32.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &ul->lock irq_context: 0 sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 pool_lock#2 irq_context: 0 sk_lock-AF_INET6 batched_entropy_u16.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &table->hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &rq->__lock irq_context: softirq rcu_callback rcu_read_lock rt6_exception_lock irq_context: softirq rcu_callback &ul->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &p->alloc_lock irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock clock-AF_INET irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock clock-AF_INET irq_context: 0 sk_lock-AF_INET &h->lhash2[i].lock irq_context: 0 &u->lock/1 irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 &h->lhash2[i].lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock pgd_lock irq_context: 0 &mm->mmap_lock rcu_read_lock key irq_context: 0 &mm->mmap_lock rcu_read_lock pcpu_lock irq_context: 0 &mm->mmap_lock rcu_read_lock percpu_counters_lock irq_context: 0 &tty->legacy_mutex &tty->ldisc_sem irq_context: 0 &tty->legacy_mutex tasklist_lock irq_context: 0 &tty->legacy_mutex tasklist_lock &sighand->siglock irq_context: 0 &tty->legacy_mutex tasklist_lock &sighand->siglock &tty->ctrl.lock irq_context: 0 &tty->ldisc_sem rcu_read_lock &tty->ctrl.lock irq_context: 0 &tty->ctrl.lock irq_context: 0 tasklist_lock rcu_read_lock &sighand->siglock irq_context: 0 tasklist_lock &sighand->siglock irq_context: 0 &tty->legacy_mutex &tty->ctrl.lock irq_context: 0 &tty->legacy_mutex &f->f_lock irq_context: 0 &tty->legacy_mutex &f->f_lock fasync_lock irq_context: 0 &tty->legacy_mutex &obj_hash[i].lock irq_context: 0 &tty->legacy_mutex pool_lock#2 irq_context: 0 rcu_read_lock &tty->ctrl.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &dentry->d_lock &wq#2 irq_context: 0 &port_lock_key irq_context: 0 &buf->lock irq_context: 0 &tty->ldisc_sem &port_lock_key irq_context: 0 &tty->ldisc_sem &port->lock irq_context: 0 &tty->ldisc_sem &tty->termios_rwsem &tty->ldisc_sem &tty->flow.lock irq_context: 0 rtnl_mutex &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 rtnl_mutex &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 rtnl_mutex &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: softirq &(&cache_cleaner)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &tty->ldisc_sem &ldata->atomic_read_lock irq_context: 0 &tty->ldisc_sem &ldata->atomic_read_lock &tty->termios_rwsem irq_context: 0 &tty->ldisc_sem &ldata->atomic_read_lock &tty->termios_rwsem &tty->read_wait irq_context: 0 &tty->ldisc_sem &ldata->atomic_read_lock (work_completion)(&buf->work) irq_context: 0 &tty->ldisc_sem &ldata->atomic_read_lock &rq->__lock irq_context: 0 &tty->ldisc_sem &ldata->atomic_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&idev->mc_dad_work)->timer irq_context: softirq &(&idev->mc_dad_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&idev->mc_dad_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&idev->mc_dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&idev->mc_dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&idev->mc_dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock irq_context: 0 rtnl_mutex &lapb->lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->packet.sklist_lock irq_context: 0 sk_lock-AF_PACKET irq_context: 0 sk_lock-AF_PACKET slock-AF_PACKET irq_context: 0 sk_lock-AF_PACKET &po->bind_lock irq_context: 0 sk_lock-AF_PACKET &po->bind_lock ptype_lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_node_0 irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &rq->__lock irq_context: 0 sk_lock-AF_PACKET &obj_hash[i].lock irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock ptype_lock irq_context: 0 slock-AF_PACKET irq_context: 0 sk_lock-AF_PACKET &mm->mmap_lock irq_context: 0 sk_lock-AF_PACKET fs_reclaim irq_context: 0 sk_lock-AF_PACKET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_PACKET pool_lock#2 irq_context: 0 sk_lock-AF_PACKET free_vmap_area_lock irq_context: 0 sk_lock-AF_PACKET free_vmap_area_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_PACKET vmap_area_lock irq_context: 0 sk_lock-AF_PACKET &____s->seqcount irq_context: 0 sk_lock-AF_PACKET pcpu_alloc_mutex irq_context: 0 sk_lock-AF_PACKET pcpu_alloc_mutex pcpu_lock irq_context: 0 sk_lock-AF_PACKET pack_mutex irq_context: 0 sk_lock-AF_PACKET batched_entropy_u32.lock irq_context: 0 sk_lock-AF_PACKET text_mutex irq_context: 0 sk_lock-AF_PACKET text_mutex ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_PACKET &fp->aux->used_maps_mutex irq_context: 0 rlock-AF_PACKET irq_context: 0 wlock-AF_PACKET irq_context: 0 sb_writers#5 &obj_hash[i].lock pool_lock irq_context: softirq _xmit_ETHER#2 &obj_hash[i].lock irq_context: softirq _xmit_ETHER#2 pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &pl->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &pl->lock key#12 irq_context: 0 &ret->b_state_lock &journal->j_list_lock key#14 irq_context: softirq (&rxnet->peer_keepalive_timer) rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq net/core/link_watch.c:31 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &mm->mmap_lock remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock crngs.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock &base->lock &obj_hash[i].lock irq_context: 0 &u->iolock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &n->list_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &n->list_lock &c->lock irq_context: 0 rtnl_mutex class irq_context: 0 rtnl_mutex (&tbl->proxy_timer) irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ifa->lock &obj_hash[i].lock irq_context: softirq &(&idev->mc_ifc_work)->timer irq_context: softirq &(&idev->mc_ifc_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&idev->mc_ifc_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&idev->mc_ifc_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&idev->mc_ifc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&idev->mc_ifc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: softirq rcu_callback &ul->lock#2 irq_context: 0 rtnl_mutex &net->ipv6.addrconf_hash_lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock krc.lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &base->lock irq_context: 0 rtnl_mutex &idev->mc_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &dir->lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock krc.lock irq_context: 0 rtnl_mutex &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex &tb->tb6_lock rt6_exception_lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 &dir->lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 &obj_hash[i].lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 sk_lock-AF_PACKET &c->lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq _xmit_ETHER#2 rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rlock-AF_PACKET irq_context: softirq rcu_read_lock rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock once_lock irq_context: softirq rcu_read_lock rcu_read_lock once_lock crngs.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq &(&ifa->dad_work)->timer irq_context: softirq &(&ifa->dad_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&ifa->dad_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&ifa->dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&ifa->dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&ifa->dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &dir->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ul->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 fs_reclaim irq_context: 0 sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock irq_context: 0 sk_lock-AF_INET6 once_lock irq_context: 0 sk_lock-AF_INET6 once_lock crngs.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_node_0 irq_context: 0 sk_lock-AF_INET6 &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &tbl->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &____s->seqcount#9 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->packet.sklist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &po->bind_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &po->bind_lock ptype_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &po->bind_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &po->bind_lock &dir->lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET slock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 fanout_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#9 &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 pcpu_lock irq_context: 0 &sb->s_type->i_mutex_key#10 elock-AF_PACKET irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock krc.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock krc.lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &sighand->siglock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock batched_entropy_u8.lock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock kfence_freelist_lock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock pool_lock#2 irq_context: 0 &ep->mtx rcu_read_lock &sighand->signalfd_wqh irq_context: 0 &ep->mtx rcu_read_lock &ei->socket.wq.wait irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &sem->wait_lock irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &sem->wait_lock irq_context: 0 &mm->mmap_lock &sem->wait_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &p->pi_lock irq_context: 0 &mm->mmap_lock &p->pi_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &obj_hash[i].lock pool_lock irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem pgd_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem key irq_context: 0 &mm->mmap_lock &anon_vma->rwsem pcpu_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem percpu_counters_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem pool_lock#2 irq_context: 0 &mm->mmap_lock &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq _xmit_ETHER#2 quarantine_lock irq_context: softirq rcu_read_lock rcu_read_lock batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock kfence_freelist_lock irq_context: softirq rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock once_lock irq_context: softirq rcu_read_lock once_lock crngs.lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&dev->watchdog_timer) irq_context: softirq (&dev->watchdog_timer) &dev->tx_global_lock irq_context: softirq (&dev->watchdog_timer) &dev->tx_global_lock &obj_hash[i].lock irq_context: softirq (&dev->watchdog_timer) &dev->tx_global_lock &base->lock irq_context: softirq (&dev->watchdog_timer) &dev->tx_global_lock &base->lock &obj_hash[i].lock irq_context: softirq (&lapb->t1timer) irq_context: softirq (&lapb->t1timer) &lapb->lock irq_context: softirq (&lapb->t1timer) &lapb->lock batched_entropy_u8.lock irq_context: softirq (&lapb->t1timer) &lapb->lock kfence_freelist_lock irq_context: softirq (&lapb->t1timer) &lapb->lock pool_lock#2 irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh pool_lock#2 irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh &meta->lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh kfence_freelist_lock irq_context: softirq (&lapb->t1timer) &lapb->lock &obj_hash[i].lock irq_context: softirq (&lapb->t1timer) &lapb->lock &base->lock irq_context: softirq (&lapb->t1timer) &lapb->lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &____s->seqcount#10 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&lapb->t1timer) &lapb->lock &c->lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &meta->lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 kfence_freelist_lock irq_context: 0 rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rlock-AF_NETLINK irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &dir->lock#2 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &base->lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &dentry->d_lock irq_context: 0 sb_writers#5 tomoyo_ss &c->lock irq_context: 0 hostname_poll.wait.lock irq_context: 0 &vma->vm_lock->lock &lruvec->lru_lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock pool_lock#2 irq_context: softirq &(&dm_bufio_cleanup_old_work)->timer irq_context: softirq &(&dm_bufio_cleanup_old_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&dm_bufio_cleanup_old_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&dm_bufio_cleanup_old_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&dm_bufio_cleanup_old_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&dm_bufio_cleanup_old_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq &x->wait#12 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)dm_bufio_cache irq_context: 0 (wq_completion)dm_bufio_cache (work_completion)(&(&dm_bufio_cleanup_old_work)->work) irq_context: 0 (wq_completion)dm_bufio_cache (work_completion)(&(&dm_bufio_cleanup_old_work)->work) dm_bufio_clients_lock irq_context: 0 (wq_completion)dm_bufio_cache (work_completion)(&(&dm_bufio_cleanup_old_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)dm_bufio_cache (work_completion)(&(&dm_bufio_cleanup_old_work)->work) &base->lock irq_context: 0 (wq_completion)dm_bufio_cache (work_completion)(&(&dm_bufio_cleanup_old_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq &(&tbl->gc_work)->timer irq_context: softirq &(&tbl->gc_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&tbl->gc_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&tbl->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&tbl->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&tbl->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: softirq rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock once_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock once_lock crngs.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock batched_entropy_u32.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &base->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &base->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &n->lock irq_context: softirq rcu_read_lock &n->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &n->lock &base->lock irq_context: softirq rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &n->lock &(&n->ha_lock)->lock irq_context: softirq rcu_read_lock &n->lock &(&n->ha_lock)->lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock &n->lock irq_context: softirq rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock lock#8 irq_context: softirq rcu_read_lock rcu_read_lock id_table_lock irq_context: softirq rcu_read_lock &n->lock irq_context: softirq rcu_read_lock &n->lock &____s->seqcount#9 irq_context: softirq rcu_read_lock nl_table_lock irq_context: softirq rcu_read_lock rlock-AF_NETLINK irq_context: softirq rcu_read_lock rcu_read_lock &dir->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET batched_entropy_u16.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &tcp_hashinfo.bhash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET (&req->rsk_timer) irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &base->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &queue->rskq_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock tcp_metrics_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock tcp_metrics_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock tcp_metrics_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock tcp_metrics_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock tcp_metrics_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET &queue->rskq_lock irq_context: 0 sk_lock-AF_INET clock-AF_INET irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET &base->lock irq_context: 0 sk_lock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET fs_reclaim irq_context: 0 sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET &____s->seqcount irq_context: 0 sk_lock-AF_INET pool_lock#2 irq_context: 0 sk_lock-AF_INET &c->lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock irq_context: 0 sk_lock-AF_INET tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &base->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &base->lock &obj_hash[i].lock irq_context: 0 &u->iolock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 &u->iolock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_delack_timer) irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET pool_lock#2 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 &u->iolock rcu_read_lock rcu_node_0 irq_context: 0 &u->iolock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET &sd->defer_lock irq_context: softirq &sd->defer_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_INET &mm->mmap_lock fs_reclaim irq_context: 0 sk_lock-AF_INET &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET &mm->mmap_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &u->iolock &dir->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET clock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &hashinfo->ehash_locks[i] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET elock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_callback uidhash_lock irq_context: softirq rcu_callback ucounts_lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET batched_entropy_u8.lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET kfence_freelist_lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET &meta->lock irq_context: 0 sk_lock-AF_INET kfence_freelist_lock irq_context: softirq (&icsk->icsk_retransmit_timer) irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET irq_context: 0 &pipe->wr_wait irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 kfence_freelist_lock irq_context: 0 &pipe->mutex/1 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &mapping->private_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &meta->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &(ei->i_block_reservation_lock) key#15 irq_context: 0 &rq->__lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock rcu_node_0 irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &stopper->lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &stop_pi_lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &stop_pi_lock &rq->__lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &stopper->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &stop_pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &stop_pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock &anon_vma->rwsem ptlock_ptr(page) irq_context: 0 &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &meta->lock irq_context: 0 &vma->vm_lock->lock remove_cache_srcu irq_context: 0 &vma->vm_lock->lock remove_cache_srcu quarantine_lock irq_context: 0 &vma->vm_lock->lock remove_cache_srcu &c->lock irq_context: 0 &vma->vm_lock->lock remove_cache_srcu &n->list_lock irq_context: 0 &vma->vm_lock->lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &vma->vm_lock->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 &pcp->lock &zone->lock irq_context: 0 &pipe->mutex/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &pipe->wr_wait &p->pi_lock irq_context: 0 &pipe->wr_wait &p->pi_lock &rq->__lock irq_context: 0 &pipe->wr_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &vma->vm_lock->lock &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &vma->vm_lock->lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &vma->vm_lock->lock fs_reclaim &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock irq_context: softirq slock-AF_INET tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET &n->list_lock irq_context: 0 sk_lock-AF_INET &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 kfence_freelist_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 &journal->j_list_lock &obj_hash[i].lock irq_context: 0 &journal->j_list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 &journal->j_list_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 &journal->j_state_lock &journal->j_wait_transaction_locked &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_state_lock &journal->j_wait_transaction_locked &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xa->xa_lock#7 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET slock-AF_INET tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &meta->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 kfence_freelist_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &zone->lock irq_context: 0 sk_lock-AF_INET batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &meta->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 &c->lock irq_context: 0 &vma->vm_lock->lock batched_entropy_u8.lock irq_context: 0 &vma->vm_lock->lock kfence_freelist_lock irq_context: 0 sk_lock-AF_INET &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_INET &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 quarantine_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_read_lock &stopper->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_read_lock &stop_pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_read_lock &stop_pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_read_lock &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock &ul->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu irq_context: 0 sk_lock-AF_INET remove_cache_srcu quarantine_lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu &c->lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu &n->list_lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET remove_cache_srcu &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_node_0 irq_context: 0 &pipe->mutex/1 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &pipe->mutex/1 &mm->mmap_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &meta->lock irq_context: 0 &pipe->wr_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->wr_wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 quarantine_lock irq_context: 0 sb_writers#4 tomoyo_ss irq_context: 0 sb_writers#4 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#4 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#4 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &tcp_hashinfo.bhash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 elock-AF_INET irq_context: softirq rcu_read_lock rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pcp->lock &zone->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 sk_lock-AF_INET6 batched_entropy_u32.lock crngs.lock irq_context: softirq drivers/regulator/core.c:6266 irq_context: softirq drivers/regulator/core.c:6266 rcu_read_lock &pool->lock irq_context: softirq drivers/regulator/core.c:6266 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq drivers/regulator/core.c:6266 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq drivers/regulator/core.c:6266 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq drivers/regulator/core.c:6266 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (regulator_init_complete_work).work irq_context: 0 (wq_completion)events (regulator_init_complete_work).work &k->list_lock irq_context: 0 (wq_completion)events (regulator_init_complete_work).work &k->k_lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &pipe->mutex/1 &mm->mmap_lock fs_reclaim irq_context: 0 &pipe->mutex/1 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &pipe->mutex/1 &mm->mmap_lock &____s->seqcount irq_context: 0 &pipe->mutex/1 &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 &pipe->mutex/1 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 &pipe->mutex/1 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss tomoyo_policy_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock quarantine_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock pgd_lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock key irq_context: 0 &vma->vm_lock->lock rcu_read_lock pcpu_lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock percpu_counters_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 tomoyo_ss &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_list_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &c->lock irq_context: 0 rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_node_0 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &rcu_state.expedited_wq irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &rq->__lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&tbl->managed_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&journal->j_commit_timer) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: 0 lock#4 &lruvec->lru_lock irq_context: 0 lock#5 irq_context: 0 rcu_read_lock &base->lock irq_context: 0 rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock batched_entropy_u32.lock crngs.lock irq_context: softirq rcu_read_lock rcu_read_lock &meta->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET batched_entropy_u16.lock crngs.lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex batched_entropy_u8.lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex batched_entropy_u8.lock crngs.lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex kfence_freelist_lock irq_context: 0 kn->active#47 fs_reclaim irq_context: 0 kn->active#47 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#47 &c->lock irq_context: 0 kn->active#47 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#47 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#47 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock khugepaged_mm_lock irq_context: 0 &mm->mmap_lock khugepaged_wait.lock irq_context: 0 &mm->mmap_lock khugepaged_wait.lock &p->pi_lock irq_context: 0 &mm->mmap_lock khugepaged_wait.lock &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock khugepaged_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock#3 &obj_hash[i].lock irq_context: 0 lock#3 rcu_read_lock &pool->lock irq_context: 0 lock#3 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 lock#3 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 lock#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 lock#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock#3 &rq->__lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) lock#4 irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) lock#4 &lruvec->lru_lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) lock#5 irq_context: 0 lock#3 (work_completion)(work) irq_context: 0 lock#3 rcu_read_lock (wq_completion)mm_percpu_wq irq_context: 0 lock#3 &x->wait#10 irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&barr->work) irq_context: 0 lock#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&barr->work) &x->wait#10 irq_context: 0 lock#3 rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock irq_context: 0 lock#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &sighand->siglock irq_context: 0 rcu_read_lock &sighand->siglock pool_lock#2 irq_context: 0 rcu_read_lock &sighand->siglock &p->pi_lock irq_context: 0 &futex_queues[i].lock irq_context: 0 rcu_read_lock &sighand->siglock batched_entropy_u8.lock irq_context: 0 rcu_read_lock &sighand->siglock kfence_freelist_lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 rcu_read_lock pool_lock#2 irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 &obj_hash[i].lock irq_context: 0 rcu_read_lock &sighand->siglock &c->lock irq_context: 0 &ep->mtx &ep->lock &ep->wq irq_context: 0 &ep->mtx &ep->lock &ep->wq &p->pi_lock irq_context: 0 &ep->mtx &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &ep->mtx &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx &lock->wait_lock irq_context: 0 &ep->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &p->lock irq_context: 0 &f->f_pos_lock &p->lock fs_reclaim irq_context: 0 &f->f_pos_lock &p->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &p->lock &c->lock irq_context: 0 &f->f_pos_lock &p->lock cpufreq_driver_lock irq_context: 0 &f->f_pos_lock &p->lock &mm->mmap_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &dentry->d_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &root->kernfs_rwsem irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock &mm->mmap_lock irq_context: 0 &ep->mtx kn->active#4 fs_reclaim irq_context: 0 &ep->mtx kn->active#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &ep->mtx kn->active#4 pool_lock#2 irq_context: 0 &ep->mtx kn->active#4 &on->poll irq_context: 0 &ep->mtx &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#4 param_lock irq_context: 0 &ep->mtx rcu_read_lock &on->poll irq_context: 0 &ep->mtx &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &p->lock &n->list_lock irq_context: 0 &f->f_pos_lock &p->lock &n->list_lock &c->lock irq_context: 0 kn->active#4 &c->lock irq_context: 0 kn->active#4 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 &ep->mtx kn->active#4 &c->lock irq_context: 0 &ep->mtx kn->active#4 &rq->__lock irq_context: 0 &f->f_pos_lock &p->lock &of->mutex &rq->__lock irq_context: 0 &ep->mtx rcu_read_lock rcu_node_0 irq_context: 0 &ep->mtx rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock &rq->__lock irq_context: 0 kn->active#4 &n->list_lock irq_context: 0 kn->active#4 &n->list_lock &c->lock irq_context: 0 kn->active#4 &rq->__lock irq_context: 0 tomoyo_ss rcu_node_0 irq_context: 0 &f->f_pos_lock &p->lock &mm->mmap_lock &rq->__lock irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock &p->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &p->lock &____s->seqcount irq_context: 0 kn->active#4 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 kn->active#4 &____s->seqcount irq_context: 0 &f->f_pos_lock &p->lock pool_lock#2 irq_context: 0 &f->f_pos_lock &p->lock module_mutex irq_context: 0 sk_lock-AF_INET rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock &ul->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET &____s->seqcount#8 irq_context: 0 sk_lock-AF_INET once_mutex irq_context: 0 sk_lock-AF_INET once_mutex crngs.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &____s->seqcount irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &c->lock irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &hashinfo->ehash_locks[i] irq_context: 0 sk_lock-AF_INET &rq->__lock irq_context: 0 sk_lock-AF_INET batched_entropy_u32.lock irq_context: 0 sk_lock-AF_INET batched_entropy_u16.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&rxnet->peer_keepalive_timer) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq (&rxnet->peer_keepalive_timer) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&wb->dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&lapb->t1timer) &lapb->lock &n->list_lock irq_context: softirq (&lapb->t1timer) &lapb->lock &n->list_lock &c->lock irq_context: 0 &ep->mtx &pipe->wr_wait irq_context: 0 rcu_read_lock tasklist_lock irq_context: 0 &ep->mtx rcu_read_lock &pipe->wr_wait irq_context: 0 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#9 irq_context: 0 sb_writers#9 &attr->mutex irq_context: 0 sb_writers#9 &attr->mutex &mm->mmap_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &c->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &____s->seqcount irq_context: 0 sb_writers#3 &p->pi_lock irq_context: 0 sb_writers#3 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#3 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#3 &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &n->list_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &s->s_inode_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->xattr_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->xattr_sem &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_revoke_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &meta_group_info[i]->alloc_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle inode_hash_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle batched_entropy_u32.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &xa->xa_lock#7 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &xa->xa_lock#7 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle lock#4 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &wb->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &xa->xa_lock#7 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem lock#4 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_es_lock key#2 irq_context: 0 &type->s_umount_key#41/1 irq_context: 0 &type->s_umount_key#41/1 fs_reclaim irq_context: 0 &type->s_umount_key#41/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#41/1 pool_lock#2 irq_context: 0 &type->s_umount_key#41/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#41/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#41/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#41/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#41/1 sb_lock irq_context: 0 &type->s_umount_key#41/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem inode_hash_lock irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem fs_reclaim irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem pool_lock#2 irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#30 irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem &sb->s_type->i_lock_key#30 irq_context: 0 &type->s_umount_key#41/1 &sb->s_type->i_lock_key#30 irq_context: 0 &type->s_umount_key#41/1 &sb->s_type->i_lock_key#30 &dentry->d_lock irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_supers_rwsem irq_context: 0 &type->s_umount_key#41/1 &dentry->d_lock irq_context: 0 sb_writers#10 irq_context: 0 sb_writers#10 mount_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tk_core.seq.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_iattr_rwsem iattr_mutex irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_iattr_rwsem iattr_mutex fs_reclaim irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_iattr_rwsem iattr_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_iattr_rwsem iattr_mutex &____s->seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_iattr_rwsem iattr_mutex pool_lock#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_iattr_rwsem iattr_mutex &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_iattr_rwsem iattr_mutex tk_core.seq.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 rename_lock.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 fs_reclaim irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &dentry->d_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem inode_hash_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem fs_reclaim irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#30 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &sb->s_type->i_lock_key#30 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &sb->s_type->i_lock_key#30 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &sb->s_type->i_lock_key#30 &dentry->d_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &sb->s_type->i_lock_key#30 &dentry->d_lock &wq#2 irq_context: 0 kn->active#48 fs_reclaim irq_context: 0 kn->active#48 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#48 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#48 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#48 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#48 &c->lock irq_context: 0 kn->active#48 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#15 irq_context: 0 sb_writers#10 fs_reclaim irq_context: 0 sb_writers#10 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &mm->mmap_lock irq_context: 0 sb_writers#10 &of->mutex irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex css_set_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock css_set_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#10 &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss tomoyo_policy_lock &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss tomoyo_policy_lock &n->list_lock &c->lock irq_context: 0 cgroup_mutex fs_reclaim irq_context: 0 cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cgroup_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 cgroup_mutex lock kernfs_idr_lock rcu_read_lock pool_lock#2 irq_context: 0 cgroup_mutex lock kernfs_idr_lock &obj_hash[i].lock irq_context: 0 cgroup_mutex cpu_hotplug_lock css_set_lock irq_context: 0 cgroup_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 cgroup_mutex css_set_lock cgroup_file_kn_lock irq_context: 0 &type->s_umount_key#42/1 irq_context: 0 &type->s_umount_key#42/1 fs_reclaim irq_context: 0 &type->s_umount_key#42/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#42/1 pool_lock#2 irq_context: 0 &type->s_umount_key#42/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#42/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#42/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#42/1 &c->lock irq_context: 0 &type->s_umount_key#42/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#42/1 sb_lock irq_context: 0 &type->s_umount_key#42/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem inode_hash_lock irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem fs_reclaim irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem pool_lock#2 irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#31 irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem &sb->s_type->i_lock_key#31 irq_context: 0 &type->s_umount_key#42/1 &sb->s_type->i_lock_key#31 irq_context: 0 &type->s_umount_key#42/1 &sb->s_type->i_lock_key#31 &dentry->d_lock irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_supers_rwsem irq_context: 0 &type->s_umount_key#42/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#43 irq_context: 0 &type->s_umount_key#43 shrinker_rwsem irq_context: 0 &type->s_umount_key#43 percpu_ref_switch_lock irq_context: 0 &type->s_umount_key#43 percpu_ref_switch_lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#43 percpu_ref_switch_lock pool_lock#2 irq_context: 0 &type->s_umount_key#43 &root->kernfs_supers_rwsem irq_context: 0 &type->s_umount_key#43 rename_lock.seqcount irq_context: 0 &type->s_umount_key#43 &dentry->d_lock irq_context: 0 &type->s_umount_key#43 rcu_read_lock &dentry->d_lock irq_context: 0 &type->s_umount_key#43 &sb->s_type->i_lock_key#31 irq_context: 0 &type->s_umount_key#43 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#43 &xa->xa_lock#7 irq_context: 0 &type->s_umount_key#43 inode_hash_lock irq_context: 0 &type->s_umount_key#43 inode_hash_lock &sb->s_type->i_lock_key#31 irq_context: 0 &type->s_umount_key#43 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#43 pool_lock#2 irq_context: 0 &type->s_umount_key#43 &fsnotify_mark_srcu irq_context: 0 &type->s_umount_key#43 sb_lock irq_context: 0 &type->s_umount_key#43 &obj_hash[i].lock pool_lock irq_context: 0 cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem tk_core.seq.seqcount irq_context: 0 cgroup_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 cgroup_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 cgroup_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 cgroup_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 cgroup_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 cgroup_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 cgroup_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 cgroup_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 cgroup_mutex &n->list_lock &c->lock irq_context: softirq rcu_callback rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq rcu_callback rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_callback rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq rcu_callback rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_callback rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_callback rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) cgroup_mutex irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) percpu_ref_switch_lock irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&css->destroy_work) irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&css->destroy_work) cgroup_mutex irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&css->destroy_work) cgroup_mutex cgroup_rstat_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&css->destroy_work) cgroup_mutex cgroup_rstat_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&css->destroy_work) cgroup_mutex css_set_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&css->destroy_work) &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&css->destroy_work) pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) percpu_ref_switch_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &cgrp->pidlist_mutex irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) (wq_completion)cgroup_pidlist_destroy irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &wq->mutex irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &wq->mutex &pool->lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &wq->mutex &x->wait#10 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) (work_completion)(&cgrp->release_agent_work) irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex css_set_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpu_hotplug_lock css_set_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex css_set_lock &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex css_set_lock pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_rstat_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_rstat_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) pcpu_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &root->kernfs_rwsem irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) kernfs_idr_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) kernfs_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex fs_reclaim irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem tk_core.seq.seqcount irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &c->lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &obj_hash[i].lock pool_lock irq_context: 0 &type->s_umount_key#42/1 &n->list_lock irq_context: 0 &type->s_umount_key#42/1 &n->list_lock &c->lock irq_context: 0 sb_writers#11 irq_context: 0 sb_writers#11 mount_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tk_core.seq.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_iattr_rwsem iattr_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_iattr_rwsem iattr_mutex fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_iattr_rwsem iattr_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_iattr_rwsem iattr_mutex tk_core.seq.seqcount irq_context: 0 cgroup_mutex cpuset_rwsem irq_context: 0 cgroup_mutex cpuset_rwsem cpuset_rwsem.rss.gp_wait.lock irq_context: 0 cgroup_mutex cpuset_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 cgroup_mutex cpuset_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cgroup_mutex cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cgroup_mutex cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cgroup_mutex cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cgroup_mutex cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cgroup_mutex cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_mutex cpuset_rwsem rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 cgroup_mutex cpuset_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 cgroup_mutex cpuset_rwsem &obj_hash[i].lock irq_context: 0 cgroup_mutex cpuset_rwsem callback_lock irq_context: 0 cgroup_mutex cpuset_rwsem.waiters.lock irq_context: 0 cgroup_mutex cpuset_rwsem.rss.gp_wait.lock irq_context: 0 cgroup_mutex cpuset_rwsem.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 cgroup_mutex &dom->lock irq_context: 0 cgroup_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 sb_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem cpuset_rwsem.rss.gp_wait.lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem callback_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem.waiters.lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem.rss.gp_wait.lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_rwsem.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) quarantine_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &dom->lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 rename_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &dentry->d_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem inode_hash_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &sb->s_type->i_lock_key#31 &dentry->d_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &sb->s_type->i_lock_key#31 &dentry->d_lock &wq#2 irq_context: 0 kn->active#49 fs_reclaim irq_context: 0 kn->active#49 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#49 &c->lock irq_context: 0 kn->active#49 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#49 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#49 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#16 irq_context: 0 sb_writers#11 fs_reclaim irq_context: 0 sb_writers#11 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &mm->mmap_lock irq_context: 0 sb_writers#11 &of->mutex irq_context: 0 sb_writers#11 &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &c->lock irq_context: 0 kn->active#50 fs_reclaim irq_context: 0 kn->active#50 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#50 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#50 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#50 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &of->mutex kn->active#50 cpu_hotplug_lock irq_context: 0 sb_writers#11 &of->mutex kn->active#50 cpu_hotplug_lock cpuset_rwsem irq_context: 0 sb_writers#11 &of->mutex kn->active#50 cpu_hotplug_lock cpuset_rwsem cpuset_rwsem.rss.gp_wait.lock irq_context: 0 sb_writers#11 &of->mutex kn->active#50 cpu_hotplug_lock cpuset_rwsem.waiters.lock irq_context: 0 sb_writers#11 &of->mutex kn->active#50 cpu_hotplug_lock cpuset_rwsem.rss.gp_wait.lock irq_context: 0 sb_writers#9 &mm->mmap_lock irq_context: 0 &type->s_umount_key#44 irq_context: 0 &type->s_umount_key#44 sb_lock irq_context: 0 &type->s_umount_key#44 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#17 irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem rename_lock irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem rename_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem mount_lock irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem mount_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem mount_lock rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem mount_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem mount_lock pool_lock#2 irq_context: 0 &sb->s_type->i_lock_key#26 irq_context: 0 sb_writers#12 irq_context: 0 sb_writers#12 fs_reclaim irq_context: 0 sb_writers#12 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#12 pool_lock#2 irq_context: 0 sb_writers#12 &mm->mmap_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 rename_lock.seqcount irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 fs_reclaim irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 pool_lock#2 irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &dentry->d_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &dentry->d_lock &wq irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &c->lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &____s->seqcount irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &sb->s_type->i_lock_key#26 irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &s->s_inode_list_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 tk_core.seq.seqcount irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 pin_fs_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 sb_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &type->s_umount_key#44 irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &type->s_umount_key#44 sb_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &type->s_umount_key#44 &dentry->d_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 mnt_id_ida.xa_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 pcpu_alloc_mutex irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 mount_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 mount_lock mount_lock.seqcount irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &obj_hash[i].lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 rcu_read_lock mount_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &sb->s_type->i_lock_key#26 &dentry->d_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 entries_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex irq_context: 0 rtnl_mutex dev_addr_sem irq_context: 0 rtnl_mutex dev_addr_sem net_rwsem irq_context: 0 rtnl_mutex dev_addr_sem net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex dev_addr_sem &tn->lock irq_context: 0 rtnl_mutex dev_addr_sem &sdata->sec_mtx irq_context: 0 rtnl_mutex dev_addr_sem &sdata->sec_mtx &sec->lock irq_context: 0 rtnl_mutex dev_addr_sem fs_reclaim irq_context: 0 rtnl_mutex dev_addr_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex dev_addr_sem pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem nl_table_lock irq_context: 0 rtnl_mutex dev_addr_sem rlock-AF_NETLINK irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &ei->socket.wq.wait irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 rtnl_mutex dev_addr_sem nl_table_wait.lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock irq_context: 0 rtnl_mutex dev_addr_sem &pn->hash_lock irq_context: softirq rcu_callback cpuset_rwsem.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock irq_context: 0 rtnl_mutex dev_addr_sem &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex dev_addr_sem &net->ipv6.fib6_gc_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem input_pool.lock irq_context: 0 rtnl_mutex _xmit_IEEE802154 irq_context: 0 rtnl_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 rtnl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 rtnl_mutex proc_subdir_lock irq_context: 0 rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 rtnl_mutex proc_subdir_lock irq_context: 0 rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fs_reclaim irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &dentry->d_lock &wq#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem key#15 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &wb->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &xa->xa_lock#7 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem key#3 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 kn->active#15 remove_cache_srcu irq_context: 0 kn->active#15 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#15 remove_cache_srcu &c->lock irq_context: 0 kn->active#15 remove_cache_srcu &n->list_lock irq_context: 0 kn->active#15 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rq->__lock irq_context: 0 &wb->list_lock irq_context: 0 &sbi->s_writepages_rwsem irq_context: 0 &sbi->s_writepages_rwsem &xa->xa_lock#7 irq_context: 0 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &sbi->s_writepages_rwsem &____s->seqcount irq_context: 0 &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem &c->lock irq_context: 0 &sbi->s_writepages_rwsem lock#4 irq_context: 0 &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 &sbi->s_writepages_rwsem lock#5 irq_context: 0 &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle lock#4 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle lock#5 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &c->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 &s->s_inode_wblist_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates irq_context: 0 &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 &sbi->s_writepages_rwsem &dd->lock irq_context: 0 &sbi->s_writepages_rwsem &base->lock irq_context: 0 &sbi->s_writepages_rwsem &base->lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: softirq &ei->i_completed_io_lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &ei->i_completed_io_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock &journal->j_wait_reserved irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_es_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_raw_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &journal->j_wait_updates irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) pool_lock#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &ext4__ioend_wq[i] irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &ret->b_uptodate_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &memcg->move_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 &base->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 &wb->work_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 &wb->work_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 &wb->work_lock &base->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 &s->s_inode_wblist_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &folio_wait_table[i] irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &folio_wait_table[i] &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_wait_commit irq_context: 0 &ret->b_state_lock &journal->j_list_lock &obj_hash[i].lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock pool_lock#2 irq_context: 0 &iint->mutex sb_writers#4 &c->lock irq_context: 0 &iint->mutex sb_writers#4 &journal->j_state_lock irq_context: 0 &iint->mutex sb_writers#4 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &iint->mutex sb_writers#4 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &iint->mutex sb_writers#4 &journal->j_state_lock &base->lock irq_context: 0 &iint->mutex sb_writers#4 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &iint->mutex mapping.invalidate_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &iint->mutex mapping.invalidate_lock lock#4 rcu_read_lock &____s->seqcount irq_context: 0 &iint->mutex mapping.invalidate_lock lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock lock#4 &obj_hash[i].lock irq_context: 0 &iint->mutex mapping.invalidate_lock &rq->__lock irq_context: 0 &iint->mutex pgd_lock irq_context: 0 &iint->mutex rcu_read_lock pool_lock#2 irq_context: 0 &iint->mutex key irq_context: 0 &iint->mutex pcpu_lock irq_context: 0 &iint->mutex percpu_counters_lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 free_vmap_area_lock irq_context: 0 &sb->s_type->i_mutex_key#8 vmap_area_lock irq_context: 0 &sb->s_type->i_mutex_key#8 init_mm.page_table_lock irq_context: 0 &sb->s_type->i_mutex_key#8 free_vmap_area_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 free_vmap_area_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 swap_cgroup_mutex irq_context: 0 &sb->s_type->i_mutex_key#8 swap_cgroup_mutex fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#8 swap_cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 swap_cgroup_mutex &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 swap_cgroup_mutex pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 &sb->s_type->i_mutex_key#8 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock &q->requeue_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &x->wait#26 irq_context: 0 &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &fq->mq_flush_lock &x->wait#26 &p->pi_lock &rq->__lock irq_context: softirq &fq->mq_flush_lock &x->wait#26 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &fq->mq_flush_lock &x->wait#26 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 (&timer.timer) irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_es_lock irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex swap_lock irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex swap_lock &p->lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex percpu_ref_switch_lock irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex swap_lock &p->lock#2 swap_avail_lock irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex (console_sem).lock irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex console_lock console_srcu console_owner_lock irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex console_lock console_srcu console_owner irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &sb->s_type->i_mutex_key#8 proc_poll_wait.lock irq_context: 0 swap_slots_cache_enable_mutex irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex cpuhp_state-down irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex cpuhp_state-up irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex &x->wait#6 irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex &rq->__lock irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock cpuhp_state-up swap_slots_cache_mutex irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock &rq->__lock irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 swap_slots_cache_enable_mutex swap_lock irq_context: 0 &sighand->siglock rcu_read_lock &____s->seqcount#5 irq_context: 0 &sighand->siglock &prev->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &sighand->siglock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock kfence_freelist_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &meta->lock irq_context: 0 &rq->__lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &rq->__lock rcu_read_lock &base->lock irq_context: 0 &rq->__lock rcu_read_lock &base->lock &obj_hash[i].lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq slock-AF_INET &obj_hash[i].lock irq_context: softirq slock-AF_INET &base->lock irq_context: softirq slock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_node_0 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &pcp->lock &zone->lock irq_context: softirq _xmit_ETHER#2 &meta->lock irq_context: softirq _xmit_ETHER#2 kfence_freelist_lock irq_context: 0 sk_lock-AF_INET &rq->__lock &cfs_rq->removed.lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem lock#4 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem lock#5 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle lock#4 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle lock#5 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ei->i_prealloc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &mapping->private_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock &journal->j_list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &pa->pa_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 &s->s_inode_wblist_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 lock#4 &lruvec->lru_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &rq_wait->wait irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &zone->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &zone->lock &____s->seqcount irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: softirq &rq_wait->wait irq_context: softirq &rq_wait->wait &p->pi_lock irq_context: softirq &rq_wait->wait &p->pi_lock &rq->__lock irq_context: softirq &rq_wait->wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &rq_wait->wait &p->pi_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock &xa->xa_lock#7 key#13 irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: 0 rcu_read_lock &sighand->siglock &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &pcp->lock &zone->lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock kfence_freelist_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &meta->lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock &p->pi_lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock batched_entropy_u8.lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock kfence_freelist_lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &meta->lock irq_context: 0 sk_lock-AF_INET quarantine_lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu &rq->__lock irq_context: softirq (&lapb->t1timer) &lapb->lock &____s->seqcount irq_context: 0 &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &journal->j_state_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pa->pa_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&timer) irq_context: softirq (&timer) &obj_hash[i].lock irq_context: softirq (&timer) &base->lock irq_context: softirq (&timer) &base->lock &obj_hash[i].lock irq_context: softirq (&timer) rcu_read_lock pool_lock#2 irq_context: softirq (&timer) rcu_read_lock &c->lock irq_context: softirq (&timer) rcu_read_lock &____s->seqcount irq_context: softirq (&timer) &txlock irq_context: softirq (&timer) &txlock &list->lock#3 irq_context: softirq (&timer) &txwq irq_context: softirq (&timer) &txwq &p->pi_lock irq_context: 0 rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 rcu_read_lock_bh &list->lock#5 irq_context: softirq &list->lock#5 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_SLIP#2 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_SLIP#2 &eql->queue.lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_SLIP#2 &eql->queue.lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_SLIP#2 &eql->queue.lock pool_lock#2 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM (console_sem).lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM console_lock console_srcu console_owner_lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM console_lock console_srcu console_owner irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM console_lock console_srcu console_owner &port_lock_key irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM console_lock console_srcu console_owner console_owner_lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM pool_lock#2 irq_context: 0 rcu_read_lock_bh _xmit_X25#2 irq_context: 0 rcu_read_lock_bh _xmit_X25#2 &lapbeth->up_lock irq_context: 0 rcu_read_lock_bh _xmit_X25#2 &lapbeth->up_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh _xmit_X25#2 &lapbeth->up_lock pool_lock#2 irq_context: 0 &ep->mtx &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &ep->mtx &mm->mmap_lock &p->pi_lock irq_context: 0 &ep->mtx &mm->mmap_lock &p->pi_lock &rq->__lock irq_context: 0 &ep->mtx &mm->mmap_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET &c->lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET &____s->seqcount irq_context: 0 &vma->vm_lock->lock &p->pi_lock irq_context: 0 &vma->vm_lock->lock &p->pi_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&n->timer) irq_context: softirq (&n->timer) &n->lock irq_context: softirq (&n->timer) &n->lock &obj_hash[i].lock irq_context: softirq (&n->timer) &n->lock &base->lock irq_context: softirq (&n->timer) &n->lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem ptlock_ptr(page)#2 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem ptlock_ptr(page)#2 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page) lock#4 irq_context: 0 &mm->mmap_lock ptlock_ptr(page) lock#4 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page) lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock ptlock_ptr(page) lock#4 &obj_hash[i].lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem batched_entropy_u8.lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem kfence_freelist_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &meta->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem key#3 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#15 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &rq_wait->wait irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &stopper->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &stop_pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &stop_pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &c->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle lock#4 &lruvec->lru_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock key#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &rq_wait->wait irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 &pl->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 &pl->lock key#12 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 key#12 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#7 key#13 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &stopper->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &stop_pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &stop_pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rq->__lock irq_context: softirq (&cb->timer) &rq_wait->wait irq_context: softirq (&cb->timer) &rq_wait->wait &p->pi_lock irq_context: softirq (&cb->timer) &rq_wait->wait &p->pi_lock &rq->__lock irq_context: softirq (&cb->timer) &rq_wait->wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &pcp->lock &zone->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_INET batched_entropy_u8.lock crngs.lock irq_context: 0 sk_lock-AF_INET batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: 0 rcu_read_lock &sighand->siglock batched_entropy_u8.lock crngs.lock irq_context: 0 rcu_read_lock &sighand->siglock batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: softirq net/wireless/reg.c:236 irq_context: softirq net/wireless/reg.c:236 rcu_read_lock &pool->lock irq_context: softirq net/wireless/reg.c:236 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq net/wireless/reg.c:236 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq net/wireless/reg.c:236 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq net/wireless/reg.c:236 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem (&timer.timer) irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem fw_lock &x->wait#23 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem dev_pm_qos_sysfs_mtx irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem kernfs_idr_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem subsys mutex#79 &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem deferred_probe_mutex irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem device_links_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) fw_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex reg_indoor_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex krc.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex krc.lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex reg_requests_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex reg_pending_beacons_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work &rq->__lock &cfs_rq->removed.lock irq_context: 0 inode_hash_lock &sb->s_type->i_lock_key#24 irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex &rdev->wiphy.mtx irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex &rdev->wiphy.mtx &wdev->mtx irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 rcu_read_lock &sighand->siglock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: softirq (&dom->period_timer) &p->sequence key#13 irq_context: 0 &ep->mtx &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq net/wireless/reg.c:533 irq_context: softirq net/wireless/reg.c:533 rcu_read_lock &pool->lock irq_context: softirq net/wireless/reg.c:533 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq net/wireless/reg.c:533 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq net/wireless/reg.c:533 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq net/wireless/reg.c:533 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex reg_indoor_lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex krc.lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex reg_requests_lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex reg_pending_beacons_lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock quarantine_lock irq_context: softirq &c->lock batched_entropy_u8.lock irq_context: softirq &c->lock kfence_freelist_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &rq_wait->wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock &base->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex krc.lock &base->lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &n->list_lock &c->lock irq_context: 0 &vma->vm_lock->lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &vma->vm_lock->lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &p->lock &mm->mmap_lock fs_reclaim irq_context: 0 &p->lock &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &mm->mmap_lock &____s->seqcount irq_context: 0 &p->lock &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#3 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#3 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#3 tomoyo_ss rcu_read_lock &rq->__lock irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#9 &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#9 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &vma->vm_lock->lock remove_cache_srcu &rq->__lock irq_context: 0 &vma->vm_lock->lock remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &vma->vm_lock->lock remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &n->list_lock &c->lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: 0 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: softirq &(&wb->bw_dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &____s->seqcount irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&tbl->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &pool->lock/1 &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: 0 &pool->lock/1 &p->pi_lock &rq->__lock &base->lock irq_context: 0 &pool->lock/1 &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock quarantine_lock irq_context: 0 sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: softirq (&sk->sk_timer) irq_context: softirq (&sk->sk_timer) slock-AF_INET irq_context: softirq (&sk->sk_timer) slock-AF_INET tk_core.seq.seqcount irq_context: softirq (&sk->sk_timer) slock-AF_INET &obj_hash[i].lock irq_context: softirq (&sk->sk_timer) slock-AF_INET &base->lock irq_context: softirq (&sk->sk_timer) slock-AF_INET &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &rq->__lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &rq->__lock &base->lock irq_context: softirq rcu_read_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock &c->lock irq_context: 0 &vma->vm_lock->lock fs_reclaim &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock &n->list_lock irq_context: 0 &vma->vm_lock->lock &n->list_lock &c->lock irq_context: softirq (&lapb->t1timer) &lapb->lock &list->lock#6 irq_context: softirq (&lapb->t1timer) &lapb->lock &list->lock#7 irq_context: softirq &list->lock#7 irq_context: softirq rcu_read_lock x25_neigh_list_lock irq_context: softirq rcu_read_lock &list->lock#8 irq_context: softirq rcu_read_lock x25_list_lock irq_context: softirq rcu_read_lock x25_forward_list_lock irq_context: softirq rcu_read_lock batched_entropy_u8.lock irq_context: softirq rcu_read_lock kfence_freelist_lock irq_context: softirq rcu_read_lock &meta->lock irq_context: 0 &sb->s_type->i_lock_key &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock irq_context: 0 sb_writers#5 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#5 tomoyo_ss &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#7 irq_context: 0 &mm->mmap_lock &info->lock irq_context: 0 &mm->mmap_lock tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock &xa->xa_lock#7 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle batched_entropy_u32.lock crngs.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &wb->work_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &wb->work_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &wb->work_lock &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 &sbinfo->stat_lock irq_context: 0 &mm->mmap_lock mount_lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_lock_key irq_context: 0 &mm->mmap_lock &wb->list_lock irq_context: 0 &mm->mmap_lock &wb->list_lock &sb->s_type->i_lock_key irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sem->wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rq->__lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#7 &c->lock irq_context: 0 sb_writers#4 &sem->wait_lock irq_context: 0 sb_writers#4 &p->pi_lock irq_context: 0 sb_writers#4 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &rq->__lock irq_context: 0 &ep->mtx fs_reclaim &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock sb_writers#5 mount_lock irq_context: 0 &mm->mmap_lock sb_writers#5 tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock sb_writers#5 &sb->s_type->i_lock_key irq_context: 0 &mm->mmap_lock sb_writers#5 &wb->list_lock irq_context: 0 &mm->mmap_lock sb_writers#5 &wb->list_lock &sb->s_type->i_lock_key irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 &newf->file_lock &newf->resize_wait irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 &kcov->lock irq_context: 0 &mm->mmap_lock &kcov->lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 key irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &c->lock irq_context: 0 &kcov->lock kcov_remote_lock irq_context: 0 &kcov->lock kcov_remote_lock pool_lock#2 irq_context: 0 pid_caches_mutex irq_context: 0 pid_caches_mutex slab_mutex irq_context: 0 pid_caches_mutex slab_mutex fs_reclaim irq_context: 0 pid_caches_mutex slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pid_caches_mutex slab_mutex pool_lock#2 irq_context: 0 pid_caches_mutex slab_mutex &c->lock irq_context: 0 pid_caches_mutex slab_mutex &n->list_lock irq_context: 0 pid_caches_mutex slab_mutex pcpu_alloc_mutex irq_context: 0 pid_caches_mutex slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 pid_caches_mutex slab_mutex &root->kernfs_rwsem irq_context: 0 pid_caches_mutex slab_mutex &k->list_lock irq_context: 0 pid_caches_mutex slab_mutex lock irq_context: 0 pid_caches_mutex slab_mutex lock kernfs_idr_lock irq_context: 0 pid_caches_mutex slab_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &type->s_umount_key#45 irq_context: 0 &type->s_umount_key#45 sb_lock irq_context: 0 &type->s_umount_key#45 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#18 irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem rename_lock irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem rename_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem mount_lock irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem mount_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem mount_lock rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem mount_lock &obj_hash[i].lock irq_context: 0 bt_proto_lock hci_sk_list.lock irq_context: 0 misc_mtx &base->lock irq_context: 0 misc_mtx &base->lock &obj_hash[i].lock irq_context: 0 (work_completion)(&(&data->open_timeout)->work) irq_context: 0 &data->open_mutex irq_context: 0 &data->open_mutex fs_reclaim irq_context: 0 &data->open_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex pool_lock#2 irq_context: 0 &data->open_mutex &____s->seqcount irq_context: 0 &data->open_mutex &obj_hash[i].lock irq_context: 0 &data->open_mutex &x->wait#9 irq_context: 0 &data->open_mutex hci_index_ida.xa_lock irq_context: 0 &data->open_mutex cpu_hotplug_lock irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 &data->open_mutex wq_pool_mutex irq_context: 0 &data->open_mutex wq_pool_mutex &wq->mutex irq_context: 0 &data->open_mutex &c->lock irq_context: 0 &data->open_mutex &n->list_lock irq_context: 0 &data->open_mutex &n->list_lock &c->lock irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex &c->lock irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex &____s->seqcount irq_context: 0 &data->open_mutex pin_fs_lock irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &data->open_mutex &k->list_lock irq_context: 0 &data->open_mutex gdp_mutex irq_context: 0 &data->open_mutex gdp_mutex &k->list_lock irq_context: 0 &data->open_mutex gdp_mutex fs_reclaim irq_context: 0 &data->open_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex gdp_mutex pool_lock#2 irq_context: 0 &data->open_mutex gdp_mutex &c->lock irq_context: 0 &data->open_mutex gdp_mutex lock irq_context: 0 &data->open_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 &data->open_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 &data->open_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &data->open_mutex lock irq_context: 0 &data->open_mutex lock kernfs_idr_lock irq_context: 0 &data->open_mutex &root->kernfs_rwsem irq_context: 0 &data->open_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &data->open_mutex bus_type_sem irq_context: 0 &data->open_mutex sysfs_symlink_target_lock irq_context: 0 &data->open_mutex rcu_read_lock pool_lock#2 irq_context: 0 &data->open_mutex &root->kernfs_rwsem irq_context: 0 &data->open_mutex &dev->power.lock irq_context: 0 &data->open_mutex dpm_list_mtx irq_context: 0 &data->open_mutex uevent_sock_mutex irq_context: 0 &data->open_mutex uevent_sock_mutex fs_reclaim irq_context: 0 &data->open_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 &data->open_mutex uevent_sock_mutex nl_table_lock irq_context: 0 &data->open_mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 bt_proto_lock &c->lock irq_context: 0 &data->open_mutex &obj_hash[i].lock pool_lock irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex &n->list_lock irq_context: 0 &data->open_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &data->open_mutex uevent_sock_mutex uevent_sock_mutex.wait_lock irq_context: 0 &data->open_mutex uevent_sock_mutex &rq->__lock irq_context: 0 &data->open_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 &data->open_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &data->open_mutex uevent_sock_mutex.wait_lock irq_context: 0 &data->open_mutex &p->pi_lock irq_context: 0 &data->open_mutex &p->pi_lock &rq->__lock irq_context: 0 &data->open_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex subsys mutex#80 irq_context: 0 &data->open_mutex subsys mutex#80 &k->k_lock irq_context: 0 &data->open_mutex subsys mutex#80 &rq->__lock irq_context: 0 &data->open_mutex &dev->devres_lock irq_context: 0 &data->open_mutex triggers_list_lock irq_context: 0 &data->open_mutex leds_list_lock irq_context: 0 &data->open_mutex leds_list_lock &led_cdev->trigger_lock irq_context: 0 &data->open_mutex rfkill_global_mutex irq_context: 0 &data->open_mutex rfkill_global_mutex fs_reclaim irq_context: 0 &data->open_mutex rfkill_global_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex rfkill_global_mutex pool_lock#2 irq_context: 0 &data->open_mutex rfkill_global_mutex &k->list_lock irq_context: 0 &data->open_mutex rfkill_global_mutex lock irq_context: 0 &data->open_mutex rfkill_global_mutex lock kernfs_idr_lock irq_context: 0 &data->open_mutex rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 &data->open_mutex rfkill_global_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &data->open_mutex rfkill_global_mutex bus_type_sem irq_context: 0 &data->open_mutex rfkill_global_mutex sysfs_symlink_target_lock irq_context: 0 &data->open_mutex rfkill_global_mutex &c->lock irq_context: 0 &data->open_mutex subsys mutex#80 &lock->wait_lock irq_context: 0 &data->open_mutex rfkill_global_mutex &n->list_lock irq_context: 0 &data->open_mutex rfkill_global_mutex &n->list_lock &c->lock irq_context: 0 &data->open_mutex rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 &data->open_mutex rfkill_global_mutex &dev->power.lock irq_context: 0 &data->open_mutex rfkill_global_mutex dpm_list_mtx irq_context: 0 &data->open_mutex rfkill_global_mutex &rfkill->lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex fs_reclaim irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex nl_table_lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex rfkill_global_mutex &rq->__lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex.wait_lock irq_context: 0 &data->open_mutex rfkill_global_mutex &p->pi_lock irq_context: 0 &data->open_mutex rfkill_global_mutex &obj_hash[i].lock irq_context: 0 &data->open_mutex rfkill_global_mutex &k->k_lock irq_context: 0 &data->open_mutex rfkill_global_mutex subsys mutex#40 irq_context: 0 &data->open_mutex rfkill_global_mutex subsys mutex#40 &k->k_lock irq_context: 0 &data->open_mutex rfkill_global_mutex triggers_list_lock irq_context: 0 &data->open_mutex rfkill_global_mutex leds_list_lock irq_context: 0 &data->open_mutex rfkill_global_mutex leds_list_lock &led_cdev->trigger_lock irq_context: 0 &data->open_mutex rfkill_global_mutex rcu_read_lock &pool->lock irq_context: 0 &data->open_mutex rfkill_global_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &data->open_mutex rfkill_global_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &data->open_mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &data->open_mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &data->open_mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex rfkill_global_mutex rfkill_global_mutex.wait_lock irq_context: 0 &data->open_mutex rfkill_global_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex rfkill_global_mutex.wait_lock irq_context: 0 &data->open_mutex &rfkill->lock irq_context: 0 &data->open_mutex hci_dev_list_lock irq_context: 0 &data->open_mutex tk_core.seq.seqcount irq_context: 0 &data->open_mutex hci_sk_list.lock irq_context: 0 &data->open_mutex (pm_chain_head).rwsem irq_context: 0 &data->open_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &data->open_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &data->open_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &data->open_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &data->open_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &data->open_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex &rq->__lock irq_context: 0 (wq_completion)hci1 irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#9 irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock pool_lock#2 irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#10 irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &hdev->req_wait_q irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex &list->lock#11 irq_context: 0 &data->open_mutex &data->read_wait irq_context: 0 &list->lock#11 irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex &c->lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI slock-AF_BLUETOOTH-BTPROTO_HCI irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI sock_cookie_ida.xa_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &p->alloc_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI pool_lock#2 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI tk_core.seq.seqcount irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI hci_sk_list.lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &obj_hash[i].lock irq_context: 0 slock-AF_BLUETOOTH-BTPROTO_HCI irq_context: 0 hci_dev_list_lock irq_context: 0 &data->read_wait irq_context: 0 bt_proto_lock &obj_hash[i].lock pool_lock irq_context: 0 &data->open_mutex uevent_sock_mutex &c->lock irq_context: 0 &data->open_mutex rfkill_global_mutex rcu_read_lock &rq->__lock irq_context: 0 &data->open_mutex rfkill_global_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) rfkill_global_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &c->lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &n->list_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &n->list_lock &c->lock irq_context: 0 (wq_completion)hci2 irq_context: 0 (wq_completion)hci3 irq_context: 0 lock pidmap_lock &n->list_lock irq_context: 0 lock pidmap_lock &n->list_lock &c->lock irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 &data->open_mutex rfkill_global_mutex lock kernfs_idr_lock &c->lock irq_context: 0 &data->open_mutex rfkill_global_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)hci4 irq_context: 0 (wq_completion)hci1#2 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &list->lock#9 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) fs_reclaim irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) pool_lock#2 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &list->lock#11 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &data->read_wait irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#9 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#10 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &hdev->req_wait_q irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &list->lock#9 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &c->lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#9 irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#10 irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &hdev->req_wait_q irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#9 irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#10 irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &hdev->req_wait_q irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#9 irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#10 irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &hdev->req_wait_q irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &list->lock#9 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) fs_reclaim irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &list->lock#11 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &data->read_wait irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &list->lock#9 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) lock#6 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) lock#6 kcov_remote_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) fs_reclaim irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) pool_lock#2 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) free_vmap_area_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) vmap_area_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &____s->seqcount irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) init_mm.page_table_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) kfence_freelist_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) (console_sem).lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 &data->open_mutex rfkill_global_mutex &____s->seqcount irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock (&timer.timer) irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &c->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) lock#6 &kcov->lock irq_context: 0 (wq_completion)hci2#2 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &list->lock#9 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) fs_reclaim irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &list->lock#11 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &data->read_wait irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock irq_context: 0 (wq_completion)hci5 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &list->lock#9 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) fs_reclaim irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &list->lock#11 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &data->read_wait irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &list->lock#9 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) fs_reclaim irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &list->lock#11 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &data->read_wait irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &list->lock#9 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) lock#6 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) lock#6 kcov_remote_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) fs_reclaim irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) (console_sem).lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner console_owner_lock irq_context: hardirq log_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &rq->__lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock (&timer.timer) irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) lock#6 &kcov->lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#9 irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &c->lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#10 irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &hdev->req_wait_q irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &c->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &list->lock#9 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) lock#6 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) lock#6 kcov_remote_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) fs_reclaim irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &c->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) (console_sem).lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &list->lock#9 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) lock#6 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) lock#6 kcov_remote_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) fs_reclaim irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) free_vmap_area_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) vmap_area_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &____s->seqcount irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) init_mm.page_table_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) pool_lock#2 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &c->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) (console_sem).lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) lock#6 &kcov->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &list->lock#9 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) lock#6 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) lock#6 kcov_remote_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) fs_reclaim irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) (console_sem).lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) console_owner_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) console_owner irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock (&timer.timer) irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock pool_lock#2 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock (&timer.timer) irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) lock#6 &kcov->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &list->lock#9 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) fs_reclaim irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &list->lock#11 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &data->read_wait irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) console_owner_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) console_owner irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) lock#6 &kcov->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &c->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &c->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) console_owner_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) console_owner irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock (&timer.timer) irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &base->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &list->lock#9 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) lock#6 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) lock#6 kcov_remote_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) fs_reclaim irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) (console_sem).lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) console_owner_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) console_owner irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &base->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &n->list_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) console_owner_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) console_owner irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &rq->__lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock pool_lock#2 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &c->lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &n->list_lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock (&timer.timer) irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) lock#6 &kcov->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) console_owner_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) console_owner irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &base->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &c->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &c->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &base->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &c->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &c->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &c->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &n->list_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &c->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &base->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &c->lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &____s->seqcount irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock pool_lock#2 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &base->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock kfence_freelist_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &____s->seqcount irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) pool_lock#2 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &____s->seqcount irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &____s->seqcount irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock pool_lock#2 irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &____s->seqcount irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &____s->seqcount irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) pool_lock#2 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &____s->seqcount irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &____s->seqcount irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock hci_sk_list.lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) fs_reclaim irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) pool_lock#2 irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) hci_sk_list.lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &obj_hash[i].lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &c->lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &pcp->lock &zone->lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &____s->seqcount irq_context: 0 &pool->lock/1 &x->wait#10 irq_context: 0 &pool->lock/1 &x->wait#10 &p->pi_lock irq_context: 0 &pool->lock/1 &x->wait#10 &p->pi_lock &rq->__lock irq_context: 0 &pool->lock/1 &x->wait#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->req_lock irq_context: 0 &hdev->req_lock pool_lock#2 irq_context: 0 &hdev->req_lock &list->lock#10 irq_context: 0 &hdev->req_lock &list->lock#9 irq_context: 0 &hdev->req_lock rcu_read_lock &pool->lock/1 irq_context: 0 &hdev->req_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->req_lock &hdev->req_wait_q irq_context: 0 &hdev->req_lock &rq->__lock irq_context: 0 &hdev->req_lock &obj_hash[i].lock irq_context: 0 &hdev->req_lock &base->lock irq_context: 0 &hdev->req_lock &base->lock &obj_hash[i].lock irq_context: 0 &hdev->req_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem &sighand->siglock irq_context: 0 cgroup_threadgroup_rwsem &sighand->siglock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &hdev->req_lock (&timer.timer) irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI slock-AF_BLUETOOTH-BTPROTO_HCI irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI hci_sk_list.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI sock_cookie_ida.xa_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI hci_sk_list.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI clock-AF_BLUETOOTH irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_BLUETOOTH-BTPROTO_HCI irq_context: 0 &sb->s_type->i_mutex_key#10 hci_dev_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_BLUETOOTH irq_context: 0 &sb->s_type->i_mutex_key#10 wlock-AF_BLUETOOTH irq_context: 0 &sb->s_type->i_mutex_key#19 irq_context: 0 namespace_sem mnt_id_ida.xa_lock pool_lock#2 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) chan_list_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &c->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock pool_lock#2 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &x->wait#9 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->list_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) free_vmap_area_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) vmap_area_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) init_mm.page_table_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &c->lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock hci_sk_list.lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) fs_reclaim irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) hci_sk_list.lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) free_vmap_area_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) vmap_area_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &____s->seqcount irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) init_mm.page_table_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock bus_type_sem irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock sysfs_symlink_target_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &dev->power.lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock dpm_list_mtx irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex pool_lock#2 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->k_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#80 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#80 &k->k_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &____s->seqcount irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &list->lock#9 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock pool_lock#2 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &rq->__lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->cmd_sync_work) irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->cmd_sync_work) &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->cmd_sync_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->cmd_sync_work) pool_lock#2 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock pool_lock#2 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock chan_list_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->ident_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &list->lock#12 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_sk_list.lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->tx_work) irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->tx_work) &list->lock#12 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->tx_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->tx_work) &list->lock#11 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->tx_work) &data->read_wait irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->tx_work) &list->lock#9 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&conn->pending_rx_work) irq_context: 0 (wq_completion)hci3#2 (work_completion)(&conn->pending_rx_work) &list->lock#13 irq_context: 0 rcu_read_lock &undo_list->lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock hci_sk_list.lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) fs_reclaim irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) hci_sk_list.lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) chan_list_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &x->wait#9 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) chan_list_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &n->list_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &n->list_lock &c->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &x->wait#9 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem &obj_hash[i].lock irq_context: 0 rtnl_mutex &nr_netdev_addr_lock_key irq_context: 0 rtnl_mutex listen_lock irq_context: 0 rtnl_mutex dev_addr_sem &c->lock irq_context: 0 pernet_ops_rwsem irq_context: 0 pernet_ops_rwsem stack_depot_init_mutex irq_context: 0 pernet_ops_rwsem crngs.lock irq_context: 0 pernet_ops_rwsem proc_inum_ida.xa_lock irq_context: 0 pernet_ops_rwsem fs_reclaim irq_context: 0 pernet_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem pool_lock#2 irq_context: 0 pernet_ops_rwsem proc_subdir_lock irq_context: 0 pernet_ops_rwsem proc_subdir_lock irq_context: 0 pernet_ops_rwsem sysctl_lock irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem &c->lock irq_context: 0 pernet_ops_rwsem &sb->s_type->i_lock_key#8 irq_context: 0 pernet_ops_rwsem &dir->lock irq_context: 0 pernet_ops_rwsem &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK k-slock-AF_NETLINK irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rhashtable_bucket irq_context: 0 pernet_ops_rwsem k-slock-AF_NETLINK irq_context: 0 pernet_ops_rwsem nl_table_lock irq_context: 0 pernet_ops_rwsem nl_table_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem nl_table_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem nl_table_wait.lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &c->lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &n->list_lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem &n->list_lock irq_context: 0 pernet_ops_rwsem &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &c->lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &____s->seqcount irq_context: 0 pernet_ops_rwsem &____s->seqcount irq_context: 0 pernet_ops_rwsem nl_table_lock irq_context: 0 pernet_ops_rwsem &net->rules_mod_lock irq_context: 0 pernet_ops_rwsem proc_inum_ida.xa_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem percpu_counters_lock irq_context: 0 pernet_ops_rwsem batched_entropy_u32.lock irq_context: 0 pernet_ops_rwsem &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->list_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock bus_type_sem irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock sysfs_symlink_target_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &c->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &dev->power.lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock dpm_list_mtx irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &dentry->d_lock sysctl_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock sysctl_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->k_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#80 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#80 &k->k_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &list->lock#9 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->cmd_sync_work) irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->cmd_sync_work) &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->cmd_sync_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock chan_list_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->ident_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &list->lock#12 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_sk_list.lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &____s->seqcount irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock pool_lock#2 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->tx_work) irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->tx_work) &list->lock#12 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->tx_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->tx_work) &list->lock#11 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->tx_work) &data->read_wait irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->tx_work) &list->lock#9 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&conn->pending_rx_work) irq_context: 0 (wq_completion)hci1#2 (work_completion)(&conn->pending_rx_work) &list->lock#13 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &____s->seqcount irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &____s->seqcount irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->list_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock bus_type_sem irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock sysfs_symlink_target_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &dev->power.lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock dpm_list_mtx irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->k_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#80 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#80 &k->k_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &list->lock#9 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock chan_list_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->ident_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &list->lock#12 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_sk_list.lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &c->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &c->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &____s->seqcount irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex pool_lock#2 irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->cmd_sync_work) irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->cmd_sync_work) &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->cmd_sync_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->tx_work) irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->tx_work) &list->lock#12 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->tx_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->tx_work) &list->lock#11 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->tx_work) &data->read_wait irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->tx_work) &list->lock#9 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&conn->pending_rx_work) irq_context: 0 (wq_completion)hci4#2 (work_completion)(&conn->pending_rx_work) &list->lock#13 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock hci_sk_list.lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) fs_reclaim irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) hci_sk_list.lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &obj_hash[i].lock irq_context: 0 sb_writers#3 remove_cache_srcu irq_context: 0 sb_writers#3 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#3 remove_cache_srcu &c->lock irq_context: 0 sb_writers#3 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &rq->__lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem cache_list_lock irq_context: 0 pernet_ops_rwsem tk_core.seq.seqcount irq_context: 0 pernet_ops_rwsem rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem &k->list_lock irq_context: 0 pernet_ops_rwsem lock irq_context: 0 pernet_ops_rwsem lock kernfs_idr_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pernet_ops_rwsem uevent_sock_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem uevent_sock_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem uevent_sock_mutex nl_table_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex nl_table_wait.lock irq_context: 0 pernet_ops_rwsem &sn->pipefs_sb_lock irq_context: 0 pernet_ops_rwsem &s->s_inode_list_lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem nf_connlabels_lock irq_context: 0 pernet_ops_rwsem nf_ct_ecache_mutex irq_context: 0 pernet_ops_rwsem nf_log_mutex irq_context: 0 pernet_ops_rwsem ipvs->est_mutex irq_context: 0 pernet_ops_rwsem ipvs->est_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem ipvs->est_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem &base->lock irq_context: 0 pernet_ops_rwsem &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &c->lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem &hashinfo->lock#2 irq_context: 0 pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &ei->socket.wq.wait irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock &c->lock irq_context: 0 sb_writers#3 &n->list_lock irq_context: 0 sb_writers#3 &n->list_lock &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) chan_list_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &x->wait#9 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->list_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock bus_type_sem irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock sysfs_symlink_target_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &c->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &dev->power.lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock dpm_list_mtx irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->k_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#80 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#80 &k->k_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &list->lock#9 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &c->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock chan_list_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->ident_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &list->lock#12 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &____s->seqcount irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock pool_lock#2 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_sk_list.lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &rq->__lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &pcp->lock &zone->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->tx_work) irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->tx_work) &list->lock#12 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->tx_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->tx_work) &list->lock#11 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->tx_work) &data->read_wait irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->tx_work) &list->lock#9 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&conn->pending_rx_work) irq_context: 0 (wq_completion)hci0#2 (work_completion)(&conn->pending_rx_work) &list->lock#13 irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &____s->seqcount irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock hci_sk_list.lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) fs_reclaim irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) hci_sk_list.lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) chan_list_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &x->wait#9 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->list_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock bus_type_sem irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock sysfs_symlink_target_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &c->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &dev->power.lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock dpm_list_mtx irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->k_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#80 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#80 &k->k_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &list->lock#9 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->cmd_sync_work) irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->cmd_sync_work) &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->cmd_sync_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &____s->seqcount irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock pool_lock#2 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock chan_list_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->ident_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &list->lock#12 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->tx_work) irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->tx_work) &list->lock#12 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->tx_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->tx_work) &list->lock#11 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->tx_work) &data->read_wait irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->tx_work) &list->lock#9 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&conn->pending_rx_work) irq_context: 0 (wq_completion)hci2#2 (work_completion)(&conn->pending_rx_work) &list->lock#13 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_sk_list.lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex.wait_lock irq_context: 0 pernet_ops_rwsem &p->pi_lock irq_context: 0 pernet_ops_rwsem &this->receive_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &tn->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &x->wait#9 irq_context: 0 pernet_ops_rwsem rtnl_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &k->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex bus_type_sem irq_context: 0 pernet_ops_rwsem rtnl_mutex sysfs_symlink_target_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &dev->power.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dpm_list_mtx irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock hci_sk_list.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) tk_core.seq.seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) hci_sk_list.lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &rq->__lock irq_context: 0 &hdev->req_lock &obj_hash[i].lock pool_lock irq_context: 0 &hdev->req_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) chan_list_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &x->wait#9 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->list_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock bus_type_sem irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock sysfs_symlink_target_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &n->list_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &dev->power.lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock dpm_list_mtx irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &____s->seqcount irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock pool_lock#2 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex uevent_sock_mutex.wait_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &pcp->lock &zone->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex subsys mutex#17 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 pernet_ops_rwsem rtnl_mutex &dir->lock#2 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->k_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#80 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#80 &k->k_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &list->lock#9 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &rq->__lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->cmd_sync_work) irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->cmd_sync_work) &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->cmd_sync_work) &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_base_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex input_pool.lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex batched_entropy_u32.lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex &tbl->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock chan_list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->ident_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &____s->seqcount irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &list->lock#12 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex proc_subdir_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex proc_subdir_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock tk_core.seq.seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_sk_list.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &pnettable->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex smc_ib_devices.mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->tx_work) irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->tx_work) &list->lock#12 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->tx_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->tx_work) &list->lock#11 irq_context: 0 pernet_ops_rwsem rdma_nets.xa_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->tx_work) &data->read_wait irq_context: 0 pernet_ops_rwsem devices_rwsem irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->tx_work) &list->lock#9 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&conn->pending_rx_work) irq_context: 0 (wq_completion)hci5#2 (work_completion)(&conn->pending_rx_work) &list->lock#13 irq_context: 0 pernet_ops_rwsem hwsim_netgroup_ida.xa_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex stack_depot_init_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#4 &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem kfence_freelist_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem batched_entropy_u32.lock crngs.lock irq_context: 0 pernet_ops_rwsem fs_reclaim &rq->__lock irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex &c->lock irq_context: 0 pernet_ops_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rdma_nets.xa_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex failover_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 rtnl_mutex dev_addr_sem &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 pernet_ops_rwsem nl_table_lock nl_table_wait.lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_addr_sem &net->ipv6.fib6_gc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem &net->ipv6.fib6_gc_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex dev_addr_sem &net->ipv6.fib6_gc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &h->lhash2[i].lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &c->lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 pernet_ops_rwsem wq_pool_mutex irq_context: 0 pernet_ops_rwsem wq_pool_mutex &wq->mutex irq_context: 0 pernet_ops_rwsem pcpu_lock irq_context: 0 pernet_ops_rwsem &list->lock#4 irq_context: 0 pernet_ops_rwsem &dir->lock#2 irq_context: 0 pernet_ops_rwsem ptype_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rhashtable_bucket irq_context: 0 pernet_ops_rwsem k-clock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC k-slock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-slock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC k-slock-AF_RXRPC irq_context: 0 pernet_ops_rwsem k-slock-AF_RXRPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex crngs.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &sb->s_type->i_lock_key#8 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &dir->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex kthread_create_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &x->wait irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &x->wait#22 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &c->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &local->services_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC fs_reclaim irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC pool_lock#2 irq_context: 0 pernet_ops_rwsem &rxnet->conn_lock irq_context: 0 pernet_ops_rwsem &call->waitq irq_context: 0 pernet_ops_rwsem &rx->call_lock irq_context: 0 pernet_ops_rwsem &rxnet->call_lock irq_context: 0 pernet_ops_rwsem net_rwsem irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock sysctl_lock irq_context: 0 sb_writers#3 &____s->seqcount#11 irq_context: 0 sb_writers#3 &(&net->ipv4.ping_group_range.lock)->lock irq_context: 0 sb_writers#3 &(&net->ipv4.ping_group_range.lock)->lock &____s->seqcount#11 irq_context: 0 misc_mtx &dir->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &x->wait#22 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex &r->consumer_lock irq_context: 0 rtnl_mutex &r->consumer_lock &r->producer_lock irq_context: 0 rtnl_mutex failover_lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex &mm->mmap_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex quarantine_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_node_0 irq_context: softirq (&rxnet->peer_keepalive_timer) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &n->lock irq_context: 0 rtnl_mutex &n->lock &(&n->ha_lock)->lock irq_context: 0 rtnl_mutex &n->lock &(&n->ha_lock)->lock &____s->seqcount#9 irq_context: 0 rtnl_mutex &tbl->lock &n->lock irq_context: 0 rtnl_mutex rcu_read_lock lock#8 irq_context: 0 rtnl_mutex rcu_read_lock id_table_lock irq_context: 0 rtnl_mutex &n->lock irq_context: 0 rtnl_mutex &n->lock &____s->seqcount#9 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex &c->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &c->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &____s->seqcount irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &ndev->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_addr_sem &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &n->lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &n->lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock nl_table_lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock nl_table_wait.lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock rcu_read_lock lock#8 irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock rcu_read_lock id_table_lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &dir->lock#2 irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock krc.lock irq_context: 0 rtnl_mutex _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex &ndev->lock pool_lock#2 irq_context: 0 rtnl_mutex &ndev->lock &dir->lock#2 irq_context: 0 rtnl_mutex &ndev->lock pcpu_lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock irq_context: 0 rtnl_mutex &ndev->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock pool_lock#2 irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock nl_table_lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock nl_table_wait.lock irq_context: 0 rtnl_mutex &ndev->lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &ndev->lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &ndev->lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex &ndev->lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &ndev->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &ndev->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex pcpu_lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex (inetaddr_validator_chain).rwsem &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &c->lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &____s->seqcount irq_context: 0 rtnl_mutex &ndev->lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &____s->seqcount irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &rq->__lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key pool_lock#2 irq_context: 0 rtnl_mutex (switchdev_blocking_notif_chain).rwsem irq_context: 0 rtnl_mutex &br->hash_lock irq_context: 0 rtnl_mutex &br->hash_lock &____s->seqcount irq_context: 0 rtnl_mutex &br->hash_lock pool_lock#2 irq_context: 0 rtnl_mutex &br->hash_lock &c->lock irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rhashtable_bucket irq_context: 0 rtnl_mutex &br->hash_lock nl_table_lock irq_context: 0 rtnl_mutex &br->hash_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->hash_lock nl_table_wait.lock irq_context: 0 rtnl_mutex rcu_read_lock rhashtable_bucket irq_context: 0 rtnl_mutex nf_hook_mutex irq_context: 0 rtnl_mutex nf_hook_mutex nf_hook_mutex.wait_lock irq_context: 0 rtnl_mutex nf_hook_mutex &rq->__lock irq_context: 0 rtnl_mutex nf_hook_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex nf_hook_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex nf_hook_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex nf_hook_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex.wait_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex &p->pi_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex nf_hook_mutex fs_reclaim irq_context: 0 rtnl_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex nf_hook_mutex pool_lock#2 irq_context: 0 rtnl_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 rtnl_mutex nf_hook_mutex &____s->seqcount irq_context: 0 rtnl_mutex nf_hook_mutex rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex nf_hook_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &tb->tb6_lock quarantine_lock irq_context: 0 rtnl_mutex quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 rtnl_mutex j1939_netdev_lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 &rq->__lock irq_context: 0 rtnl_mutex nf_hook_mutex &c->lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &c->lock irq_context: 0 rtnl_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &obj_hash[i].lock pool_lock irq_context: softirq &(&idev->mc_ifc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events (linkwatch_work).work &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2 irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2 pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex uevent_sock_mutex &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex &bat_priv->tvlv.handler_list_lock irq_context: 0 rtnl_mutex &bat_priv->tvlv.handler_list_lock pool_lock#2 irq_context: 0 rtnl_mutex &bat_priv->tvlv.handler_list_lock &c->lock irq_context: 0 rtnl_mutex &bat_priv->tvlv.container_list_lock irq_context: 0 rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key irq_context: 0 rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key pool_lock#2 irq_context: 0 rtnl_mutex &bat_priv->softif_vlan_list_lock irq_context: 0 rtnl_mutex &bat_priv->softif_vlan_list_lock pool_lock#2 irq_context: 0 rtnl_mutex key#16 irq_context: 0 rtnl_mutex &bat_priv->tt.changes_list_lock irq_context: 0 &p->lock pgd_lock irq_context: 0 &p->lock rcu_read_lock pool_lock#2 irq_context: 0 &p->lock &obj_hash[i].lock irq_context: 0 &p->lock key irq_context: 0 &p->lock pcpu_lock irq_context: 0 &p->lock percpu_counters_lock irq_context: 0 &p->lock &cfs_rq->removed.lock irq_context: softirq &(&bat_priv->nc.work)->timer irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) key#17 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) key#18 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex pgd_lock irq_context: 0 rtnl_mutex key irq_context: 0 rtnl_mutex percpu_counters_lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 rtnl_mutex kernfs_idr_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock xps_map_mutex irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex tk_core.seq.seqcount irq_context: 0 rtnl_mutex &wq->mutex irq_context: 0 rtnl_mutex &wq->mutex &pool->lock irq_context: 0 rtnl_mutex wq_pool_mutex &wq->mutex &pool->lock irq_context: 0 rtnl_mutex wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 rtnl_mutex init_lock irq_context: 0 rtnl_mutex init_lock slab_mutex irq_context: 0 rtnl_mutex init_lock slab_mutex fs_reclaim irq_context: 0 rtnl_mutex init_lock slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex init_lock slab_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex init_lock slab_mutex &c->lock irq_context: 0 rtnl_mutex init_lock slab_mutex &____s->seqcount irq_context: 0 rtnl_mutex init_lock slab_mutex pool_lock#2 irq_context: 0 rtnl_mutex init_lock slab_mutex &n->list_lock irq_context: 0 rtnl_mutex init_lock slab_mutex pcpu_alloc_mutex irq_context: 0 rtnl_mutex init_lock slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 rtnl_mutex init_lock slab_mutex &root->kernfs_rwsem irq_context: 0 rtnl_mutex init_lock slab_mutex &k->list_lock irq_context: 0 rtnl_mutex init_lock slab_mutex lock irq_context: 0 rtnl_mutex init_lock slab_mutex lock kernfs_idr_lock irq_context: 0 rtnl_mutex init_lock slab_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex init_lock fs_reclaim irq_context: 0 rtnl_mutex init_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex init_lock &zone->lock irq_context: 0 rtnl_mutex init_lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex init_lock &____s->seqcount irq_context: 0 rtnl_mutex init_lock pool_lock#2 irq_context: 0 rtnl_mutex init_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex init_lock &base->lock irq_context: 0 rtnl_mutex init_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex init_lock crngs.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &____s->seqcount irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rcu_state.expedited_wq irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex lweventlist_lock &c->lock irq_context: 0 rtnl_mutex lweventlist_lock &n->list_lock irq_context: 0 rtnl_mutex lweventlist_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) &rq->__lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 rtnl_mutex &meta->lock irq_context: 0 rtnl_mutex deferred_lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events deferred_process_work irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &pool->lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex target_list_lock irq_context: 0 rtnl_mutex rcu_read_lock _xmit_ETHER irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &br->lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rhashtable_bucket irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &br->lock &br->hash_lock pool_lock#2 irq_context: 0 rtnl_mutex &br->lock &br->hash_lock nl_table_lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock nl_table_wait.lock irq_context: 0 rtnl_mutex &br->lock lweventlist_lock irq_context: 0 rtnl_mutex &br->lock lweventlist_lock pool_lock#2 irq_context: 0 rtnl_mutex &br->lock lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex &pn->hash_lock irq_context: 0 rtnl_mutex &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock irq_context: 0 rtnl_mutex &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex &net->ipv6.fib6_gc_lock &obj_hash[i].lock irq_context: 0 fill_pool_map-wait-type-override &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq log_wait.lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override batched_entropy_u8.lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override kfence_freelist_lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex deferred_lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex (switchdev_blocking_notif_chain).rwsem irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &dir->lock#2 irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events deferred_process_work &p->pi_lock irq_context: 0 (wq_completion)events deferred_process_work &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events deferred_process_work &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: softirq &(&bat_priv->mcast.work)->timer irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock key#16 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock &bat_priv->tt.changes_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock &bat_priv->tvlv.container_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock &c->lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock &____s->seqcount irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &meta->lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex kfence_freelist_lock irq_context: softirq (&wq_watchdog_timer) &obj_hash[i].lock irq_context: softirq (&wq_watchdog_timer) &base->lock irq_context: softirq (&wq_watchdog_timer) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond0 irq_context: 0 (wq_completion)bond0 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond0 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond0 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond0 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq &(&slave->notify_work)->timer irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex &tn->lock &rq->__lock irq_context: 0 rtnl_mutex _xmit_ETHER &____s->seqcount irq_context: 0 (wq_completion)bond0#2 irq_context: 0 (wq_completion)bond0#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond0#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond0#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond0#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond0#3 irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_node_0 irq_context: 0 (wq_completion)bond0#4 irq_context: 0 (wq_completion)bond0#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond0#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond0#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond0#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond0#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &rcu_state.expedited_wq irq_context: 0 rtnl_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond0#5 irq_context: 0 (wq_completion)bond0#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond0#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond0#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond0#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key irq_context: 0 rtnl_mutex team->team_lock_key fs_reclaim irq_context: 0 rtnl_mutex team->team_lock_key fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex team->team_lock_key pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key netpoll_srcu irq_context: 0 rtnl_mutex team->team_lock_key net_rwsem irq_context: 0 rtnl_mutex team->team_lock_key net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key &tn->lock irq_context: 0 rtnl_mutex team->team_lock_key _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key &c->lock irq_context: 0 rtnl_mutex team->team_lock_key input_pool.lock irq_context: 0 rtnl_mutex team->team_lock_key rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex team->team_lock_key &im->lock irq_context: 0 rtnl_mutex team->team_lock_key _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key cbs_list_lock irq_context: 0 rtnl_mutex team->team_lock_key &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key sysfs_symlink_target_lock irq_context: 0 rtnl_mutex team->team_lock_key lock irq_context: 0 rtnl_mutex team->team_lock_key lock kernfs_idr_lock irq_context: 0 rtnl_mutex team->team_lock_key &root->kernfs_rwsem irq_context: 0 rtnl_mutex team->team_lock_key &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex team->team_lock_key remove_cache_srcu irq_context: 0 rtnl_mutex team->team_lock_key remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex team->team_lock_key &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key (console_sem).lock irq_context: 0 rtnl_mutex team->team_lock_key console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key console_lock console_srcu console_owner irq_context: 0 rtnl_mutex team->team_lock_key console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex team->team_lock_key console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#2 irq_context: 0 rtnl_mutex team->team_lock_key#2 fs_reclaim irq_context: 0 rtnl_mutex team->team_lock_key#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex team->team_lock_key#2 &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#2 netpoll_srcu irq_context: 0 rtnl_mutex team->team_lock_key#2 net_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#2 net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#2 &tn->lock irq_context: 0 rtnl_mutex team->team_lock_key#2 _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key#2 &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#2 &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#2 pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#2 input_pool.lock irq_context: 0 rtnl_mutex team->team_lock_key#2 rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#2 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#2 nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#2 nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key#2 rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key#2 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#2 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#2 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#2 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#2 &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex team->team_lock_key#2 &im->lock irq_context: 0 rtnl_mutex team->team_lock_key#2 _xmit_ETHER &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#2 _xmit_ETHER &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#2 cbs_list_lock irq_context: 0 rtnl_mutex team->team_lock_key#2 &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#2 sysfs_symlink_target_lock irq_context: 0 rtnl_mutex team->team_lock_key#2 lock irq_context: 0 rtnl_mutex team->team_lock_key#2 lock kernfs_idr_lock irq_context: 0 rtnl_mutex team->team_lock_key#2 &root->kernfs_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#2 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#2 lweventlist_lock irq_context: 0 rtnl_mutex team->team_lock_key#2 lweventlist_lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#2 lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#2 (console_sem).lock irq_context: 0 rtnl_mutex team->team_lock_key#2 console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#2 console_lock console_srcu console_owner irq_context: 0 rtnl_mutex team->team_lock_key#2 console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex team->team_lock_key#2 console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#2 &rq->__lock irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex team->team_lock_key lweventlist_lock irq_context: 0 rtnl_mutex team->team_lock_key lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#2 &pcp->lock &zone->lock irq_context: 0 rtnl_mutex team->team_lock_key#2 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond0#6 irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#3 irq_context: 0 rtnl_mutex team->team_lock_key#3 fs_reclaim irq_context: 0 rtnl_mutex team->team_lock_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex team->team_lock_key#3 netpoll_srcu irq_context: 0 rtnl_mutex team->team_lock_key#3 net_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#3 net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#3 &tn->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key#3 &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#3 input_pool.lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#3 pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#3 &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &im->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 cbs_list_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 sysfs_symlink_target_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 lock irq_context: 0 rtnl_mutex team->team_lock_key#3 lock kernfs_idr_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 lock kernfs_idr_lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#3 &root->kernfs_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#3 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#3 lweventlist_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#3 (console_sem).lock irq_context: 0 rtnl_mutex team->team_lock_key#3 console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 console_lock console_srcu console_owner irq_context: 0 rtnl_mutex team->team_lock_key#3 console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex team->team_lock_key#3 console_lock console_srcu console_owner console_owner_lock irq_context: softirq &(&bat_priv->orig_work)->timer irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) key#19 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 irq_context: 0 rtnl_mutex team->team_lock_key#4 fs_reclaim irq_context: 0 rtnl_mutex team->team_lock_key#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex team->team_lock_key#4 netpoll_srcu irq_context: 0 rtnl_mutex team->team_lock_key#4 net_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#4 net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#4 &tn->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key#4 &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#4 input_pool.lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#4 nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &im->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 cbs_list_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 sysfs_symlink_target_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 lock irq_context: 0 rtnl_mutex team->team_lock_key#4 lock kernfs_idr_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &root->kernfs_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#4 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#4 &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#4 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#4 fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 lweventlist_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#4 (console_sem).lock irq_context: 0 rtnl_mutex team->team_lock_key#4 console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 console_lock console_srcu console_owner irq_context: 0 rtnl_mutex team->team_lock_key#4 console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex team->team_lock_key#4 console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 quarantine_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 remove_cache_srcu irq_context: 0 rtnl_mutex team->team_lock_key#3 remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#4 &pcp->lock &zone->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#4 &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 irq_context: 0 rtnl_mutex team->team_lock_key#5 fs_reclaim irq_context: 0 rtnl_mutex team->team_lock_key#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex team->team_lock_key#5 netpoll_srcu irq_context: 0 rtnl_mutex team->team_lock_key#5 net_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#5 net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 &tn->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key#5 &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 input_pool.lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#5 &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &im->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 _xmit_ETHER &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 _xmit_ETHER &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#5 cbs_list_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &n->list_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &n->list_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 sysfs_symlink_target_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 lock irq_context: 0 rtnl_mutex team->team_lock_key#5 lock kernfs_idr_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &root->kernfs_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#5 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#5 lweventlist_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 (console_sem).lock irq_context: 0 rtnl_mutex team->team_lock_key#5 console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 console_lock console_srcu console_owner irq_context: 0 rtnl_mutex team->team_lock_key#5 console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex team->team_lock_key#5 console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex crngs.lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex pool_lock#2 irq_context: 0 rtnl_mutex ptype_lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex batched_entropy_u8.lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock &base->lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock &base->lock &obj_hash[i].lock irq_context: softirq drivers/net/wireguard/ratelimiter.c:20 irq_context: softirq drivers/net/wireguard/ratelimiter.c:20 rcu_read_lock &pool->lock irq_context: softirq drivers/net/wireguard/ratelimiter.c:20 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq drivers/net/wireguard/ratelimiter.c:20 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq drivers/net/wireguard/ratelimiter.c:20 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq drivers/net/wireguard/ratelimiter.c:20 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (gc_work).work irq_context: 0 (wq_completion)events_power_efficient (gc_work).work tk_core.seq.seqcount irq_context: 0 (wq_completion)events_power_efficient (gc_work).work "ratelimiter_table_lock" irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &base->lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#6 irq_context: 0 rtnl_mutex team->team_lock_key#6 fs_reclaim irq_context: 0 rtnl_mutex team->team_lock_key#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex team->team_lock_key#6 netpoll_srcu irq_context: 0 rtnl_mutex team->team_lock_key#6 net_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#6 net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#6 &tn->lock irq_context: 0 rtnl_mutex team->team_lock_key#6 _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key#6 &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#6 input_pool.lock irq_context: 0 rtnl_mutex team->team_lock_key#6 rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#6 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#6 nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key#6 rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key#6 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#6 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#6 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#6 &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 &im->lock irq_context: 0 rtnl_mutex team->team_lock_key#6 _xmit_ETHER &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#6 _xmit_ETHER &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#6 cbs_list_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#6 sysfs_symlink_target_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 lock irq_context: 0 rtnl_mutex team->team_lock_key#6 lock kernfs_idr_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 &root->kernfs_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#6 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#6 &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#6 &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#6 pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#6 lweventlist_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#6 (console_sem).lock irq_context: 0 rtnl_mutex team->team_lock_key#6 console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 console_lock console_srcu console_owner irq_context: 0 rtnl_mutex team->team_lock_key#6 console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex team->team_lock_key#6 console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 &rq->__lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 rtnl_mutex proc_inum_ida.xa_lock pool_lock#2 irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#6 &n->list_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 &n->list_lock &c->lock irq_context: 0 rtnl_mutex lweventlist_lock &____s->seqcount irq_context: softirq &(&hdev->cmd_timer)->timer irq_context: softirq &(&hdev->cmd_timer)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&hdev->cmd_timer)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&hdev->cmd_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&hdev->cmd_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&hdev->cmd_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) (console_sem).lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &____s->seqcount irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) (console_sem).lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_node_0 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) (console_sem).lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_node_0 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex _xmit_NONE irq_context: 0 rtnl_mutex lock#9 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) (console_sem).lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) (console_sem).lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) (console_sem).lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex console_owner_lock irq_context: 0 rtnl_mutex console_owner irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#12 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &hsr->list_lock irq_context: 0 rtnl_mutex pin_fs_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 (console_sem).lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 console_lock console_srcu console_owner irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex rcu_read_lock mount_lock irq_context: 0 rtnl_mutex rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 rtnl_mutex mount_lock irq_context: 0 rtnl_mutex mount_lock mount_lock.seqcount irq_context: 0 rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond0#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond0#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond0#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond0#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond0#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex gdp_mutex pool_lock#2 irq_context: 0 rtnl_mutex gdp_mutex lock irq_context: 0 rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 rtnl_mutex &k->k_lock irq_context: 0 rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex gdp_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key fs_reclaim irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key net_rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key &tn->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 fs_reclaim irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 net_rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 &tn->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 fs_reclaim irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 net_rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 &tn->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 fs_reclaim irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 net_rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 &tn->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 fs_reclaim irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 net_rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 &tn->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 fs_reclaim irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 net_rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 &tn->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &meta->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex uevent_sock_mutex quarantine_lock irq_context: 0 rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key irq_context: 0 rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key &____s->seqcount irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key pool_lock#2 irq_context: softirq (&app->join_timer) irq_context: softirq (&app->join_timer) &app->lock irq_context: softirq (&app->join_timer) &list->lock#14 irq_context: softirq (&app->join_timer) batched_entropy_u32.lock irq_context: softirq (&app->join_timer) &obj_hash[i].lock irq_context: softirq (&app->join_timer) &base->lock irq_context: softirq (&app->join_timer) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex gdp_mutex &c->lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &bat_priv->forw_bat_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) pool_lock#2 irq_context: softirq (&app->join_timer)#2 irq_context: softirq (&app->join_timer)#2 &app->lock#2 irq_context: softirq (&app->join_timer)#2 &list->lock#15 irq_context: softirq (&app->join_timer)#2 &app->lock#2 batched_entropy_u32.lock irq_context: softirq (&app->join_timer)#2 &app->lock#2 &obj_hash[i].lock irq_context: softirq (&app->join_timer)#2 &app->lock#2 &base->lock irq_context: softirq (&app->join_timer)#2 &app->lock#2 &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3 irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3 pool_lock#2 irq_context: 0 rtnl_mutex &xa->xa_lock#13 irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#3/1 irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock &tap_major->minor_lock irq_context: 0 rtnl_mutex rcu_read_lock &tap_major->minor_lock pool_lock#2 irq_context: 0 rtnl_mutex req_lock irq_context: 0 rtnl_mutex &x->wait#11 irq_context: 0 rtnl_mutex subsys mutex#81 irq_context: 0 rtnl_mutex subsys mutex#81 &k->k_lock irq_context: 0 kn->active#51 fs_reclaim irq_context: 0 kn->active#51 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#51 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#51 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#51 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock irq_context: 0 (wq_completion)bond0 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond0 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond0 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond0 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond0 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond0#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond0#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond0#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond0#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond0#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond0#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond0#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond0#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond0#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond0#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 kn->active#52 fs_reclaim irq_context: 0 kn->active#52 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#52 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#52 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#52 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock nsim_bus_dev_ids.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &x->wait#9 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock bus_type_sem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock sysfs_symlink_target_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock dpm_list_mtx irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex device_links_srcu irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex fwnode_link_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex device_links_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex sysfs_symlink_target_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex devlinks.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key crngs.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key devlinks.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &xa->xa_lock#14 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &xa->xa_lock#14 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &xa->xa_lock#14 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex kernfs_idr_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex kernfs_idr_lock pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &idev->mc_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock &tb->tb6_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock &tb->tb6_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock &tb->tb6_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &(&fn_net->fib_chain)->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_event_queue_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock (&timer.timer) irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key stack_depot_init_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex bpf_devs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex bpf_devs_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex bpf_devs_lock rcu_read_lock rhashtable_bucket irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex net_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &tn->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &x->wait#9 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex bus_type_sem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex sysfs_symlink_target_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex dpm_list_mtx irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex subsys mutex#17 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &dir->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex dev_hotplug_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex dev_base_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex input_pool.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &tbl->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex sysctl_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex failover_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &pnettable->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex smc_ib_devices.mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &vn->sock_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock nsim_bus_dev_list_lock.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#51 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &rq->__lock irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: softirq (&app->join_timer) batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex deferred_probe_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex probe_waitqueue.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock subsys mutex#82 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex devlinks.xa_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 crngs.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 devlinks.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &xa->xa_lock#14 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &base->lock &obj_hash[i].lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&hwstats->traffic_dw)->timer irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &hwstats->hwsdev_list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex devnet_rename_sem irq_context: 0 rtnl_mutex devnet_rename_sem (console_sem).lock irq_context: 0 rtnl_mutex devnet_rename_sem console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex devnet_rename_sem console_lock console_srcu console_owner irq_context: 0 rtnl_mutex devnet_rename_sem console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex devnet_rename_sem console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex devnet_rename_sem fs_reclaim irq_context: 0 rtnl_mutex devnet_rename_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex devnet_rename_sem pool_lock#2 irq_context: 0 rtnl_mutex devnet_rename_sem &k->list_lock irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem fs_reclaim irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem &____s->seqcount irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem pool_lock#2 irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem kernfs_rename_lock irq_context: 0 rtnl_mutex devnet_rename_sem kernfs_rename_lock irq_context: 0 rtnl_mutex devnet_rename_sem &c->lock irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex fs_reclaim irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex pool_lock#2 irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex nl_table_lock irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex nl_table_wait.lock irq_context: 0 rtnl_mutex devnet_rename_sem &obj_hash[i].lock irq_context: 0 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 rtnl_mutex &devlink_port->type_lock irq_context: 0 rtnl_mutex sysctl_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sysctl_lock pool_lock#2 irq_context: 0 rtnl_mutex sysctl_lock krc.lock irq_context: 0 rtnl_mutex &nft_net->commit_mutex irq_context: 0 rtnl_mutex &ent->pde_unload_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock &tb->tb6_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock &tb->tb6_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &(&fn_net->fib_chain)->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 batched_entropy_u8.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 kfence_freelist_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 stack_depot_init_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock rcu_read_lock rhashtable_bucket irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex net_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &tn->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &x->wait#9 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bus_type_sem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex sysfs_symlink_target_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex dpm_list_mtx irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex subsys mutex#17 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &dir->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex dev_hotplug_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex dev_base_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex input_pool.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &tbl->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex sysctl_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex failover_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &idev->mc_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &pnettable->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex smc_ib_devices.mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &vn->sock_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &data->fib_event_queue_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 pool_lock#2 irq_context: 0 rtnl_mutex sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex sysctl_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex sysctl_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex devnet_rename_sem &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &____s->seqcount irq_context: 0 rtnl_mutex devnet_rename_sem &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 crngs.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 devlinks.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &xa->xa_lock#14 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 remove_cache_srcu irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#2 &devlink_port->type_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &nsim_dev->fa_cookie_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock &tb->tb6_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock &tb->tb6_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &(&fn_net->fib_chain)->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 stack_depot_init_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex bpf_devs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex bpf_devs_lock rcu_read_lock rhashtable_bucket irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex net_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &tn->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &x->wait#9 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex bus_type_sem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex sysfs_symlink_target_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex dpm_list_mtx irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex subsys mutex#17 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &dir->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex dev_hotplug_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex dev_base_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex input_pool.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &tbl->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex sysctl_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex failover_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &idev->mc_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &pnettable->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex smc_ib_devices.mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &vn->sock_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &rq->__lock irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem &c->lock irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex &rq->__lock irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock kfence_freelist_lock irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex &c->lock irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &wg->static_identity.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &wg->static_identity.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock pcpu_alloc_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &handshake->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &table->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &peer->endpoint_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex.wait_lock irq_context: 0 cb_lock genl_mutex &p->pi_lock irq_context: 0 cb_lock genl_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock batched_entropy_u32.lock crngs.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &n->list_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &data->fib_event_queue_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &data->fib_event_queue_lock irq_context: 0 rtnl_mutex rcu_read_lock &n->list_lock irq_context: 0 rtnl_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 kn->active#52 &c->lock irq_context: 0 kn->active#52 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 crngs.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 devlinks.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &xa->xa_lock#14 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 remove_cache_srcu irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &obj_hash[i].lock pool_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &rq->__lock irq_context: 0 cb_lock genl_mutex genl_mutex.wait_lock irq_context: 0 cb_lock genl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex.wait_lock irq_context: 0 cb_lock &p->pi_lock irq_context: 0 cb_lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#3 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#3 &devlink_port->type_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 rtnl_mutex _xmit_SIT irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &c->lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem &br->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock &tb->tb6_lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock rcu_read_lock rhashtable_bucket irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock &tb->tb6_lock &data->fib_event_queue_lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock nl_table_wait.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &nsim_dev->fa_cookie_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex &br->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->lock &base->lock irq_context: 0 rtnl_mutex &br->lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &(&fn_net->fib_chain)->lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock &c->lock irq_context: softirq (&brmctx->ip6_own_query.timer) irq_context: softirq (&brmctx->ip6_own_query.timer) &br->multicast_lock irq_context: softirq (&brmctx->ip4_own_query.timer) irq_context: softirq (&brmctx->ip4_own_query.timer) &br->multicast_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ifa->lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ifa->lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex _xmit_TUNNEL irq_context: 0 rtnl_mutex &ndev->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 stack_depot_init_mutex irq_context: 0 rtnl_mutex _xmit_IPGRE irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock rhashtable_bucket irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex net_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &tn->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &x->wait#9 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bus_type_sem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex sysfs_symlink_target_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex dpm_list_mtx irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex subsys mutex#17 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &dir->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex dev_hotplug_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex dev_base_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex input_pool.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &tbl->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex sysctl_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex failover_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &idev->mc_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &pnettable->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex smc_ib_devices.mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &vn->sock_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &ndev->lock irq_context: softirq (&in_dev->mr_ifc_timer) irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &pcp->lock &zone->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &pcp->lock &zone->lock &____s->seqcount irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &____s->seqcount irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock &dir->lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock &ul->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &____s->seqcount#7 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &nf_conntrack_locks[i] irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &obj_hash[i].lock pool_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &dir->lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &n->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &____s->seqcount#9 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) batched_entropy_u32.lock irq_context: softirq (&in_dev->mr_ifc_timer) batched_entropy_u32.lock crngs.lock irq_context: softirq (&in_dev->mr_ifc_timer) &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) &base->lock irq_context: softirq (&in_dev->mr_ifc_timer) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock batched_entropy_u32.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock crngs.lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock &base->lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rt6_exception_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &____s->seqcount irq_context: 0 rtnl_mutex _xmit_TUNNEL6 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &____s->seqcount irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock crngs.lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &nsim_dev->fa_cookie_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key/1 &c->lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key/1 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 quarantine_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &____s->seqcount#10 irq_context: softirq &(&br->gc_work)->timer irq_context: softirq &(&br->gc_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&br->gc_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) &base->lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex remove_cache_srcu irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &idev->mc_lock _xmit_ETHER &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 crngs.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 devlinks.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &xa->xa_lock#14 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem batched_entropy_u8.lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem kfence_freelist_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#4 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#4 &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &ndev->lock &n->list_lock irq_context: 0 rtnl_mutex &ndev->lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &ndev->lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &ndev->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &ndev->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock &tb->tb6_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock &tb->tb6_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &(&fn_net->fib_chain)->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 stack_depot_init_mutex irq_context: 0 rtnl_mutex devnet_rename_sem &n->list_lock irq_context: 0 rtnl_mutex devnet_rename_sem &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex bpf_devs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: softirq (&app->join_timer)#2 &app->lock#2 batched_entropy_u32.lock crngs.lock irq_context: softirq &(&br->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&br->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex bpf_devs_lock rcu_read_lock rhashtable_bucket irq_context: softirq &(&br->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex net_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &tn->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &x->wait#9 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex gdp_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex gdp_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex bus_type_sem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex sysfs_symlink_target_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex dpm_list_mtx irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex subsys mutex#17 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &dir->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex dev_hotplug_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex dev_base_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex input_pool.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &tbl->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex sysctl_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex failover_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &idev->mc_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &pnettable->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex smc_ib_devices.mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &vn->sock_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &xa->xa_lock#14 &c->lock irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &bond->stats_lock/1 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pcpu_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &nsim_dev->fa_cookie_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &pool->lock &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_alloc_mutex irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex (inet6addr_validator_chain).rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->ipv6.addrconf_hash_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ifa->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &ndev->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem team->team_lock_key irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock quarantine_lock irq_context: 0 rtnl_mutex &tb->tb6_lock &n->list_lock irq_context: 0 rtnl_mutex &tb->tb6_lock &n->list_lock &c->lock irq_context: softirq (&app->periodic_timer) irq_context: softirq (&app->periodic_timer) &app->lock#2 irq_context: softirq (&app->periodic_timer) &app->lock#2 &obj_hash[i].lock irq_context: softirq (&app->periodic_timer) &app->lock#2 &base->lock irq_context: softirq (&app->periodic_timer) &app->lock#2 &base->lock &obj_hash[i].lock irq_context: 0 &fsnotify_mark_srcu &rq->__lock irq_context: 0 &fsnotify_mark_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex dev_addr_sem &n->list_lock irq_context: 0 rtnl_mutex dev_addr_sem &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock batched_entropy_u32.lock crngs.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex lweventlist_lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex lweventlist_lock kfence_freelist_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#5 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#5 &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 crngs.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 devlinks.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &xa->xa_lock#14 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &ndev->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock deferred_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock (console_sem).lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock console_lock console_srcu console_owner irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->multicast_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock lweventlist_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work &rq->__lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock crngs.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock &base->lock &obj_hash[i].lock irq_context: softirq (&pmctx->ip6_own_query.timer) irq_context: softirq (&pmctx->ip6_own_query.timer) &br->multicast_lock irq_context: softirq (&pmctx->ip4_own_query.timer) irq_context: softirq (&pmctx->ip4_own_query.timer) &br->multicast_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock &tb->tb6_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock &tb->tb6_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock &tb->tb6_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &(&fn_net->fib_chain)->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock deferred_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock batched_entropy_u32.lock crngs.lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 stack_depot_init_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex bpf_devs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex bpf_devs_lock rcu_read_lock rhashtable_bucket irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex net_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &tn->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &x->wait#9 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex gdp_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex gdp_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex bus_type_sem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex sysfs_symlink_target_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex dpm_list_mtx irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex subsys mutex#17 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &dir->lock#2 irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &pool->lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex dev_hotplug_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex dev_base_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex input_pool.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &tbl->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex sysctl_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex failover_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &idev->mc_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &pnettable->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex smc_ib_devices.mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &vn->sock_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (gc_work).work fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events_power_efficient (gc_work).work fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &nsim_dev->fa_cookie_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &____s->seqcount irq_context: softirq rcu_read_lock &br->hash_lock irq_context: softirq rcu_read_lock &br->hash_lock pool_lock#2 irq_context: softirq rcu_read_lock &br->hash_lock rcu_read_lock rhashtable_bucket irq_context: softirq rcu_read_lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq rcu_read_lock &br->hash_lock nl_table_lock irq_context: softirq rcu_read_lock &br->hash_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &br->hash_lock nl_table_wait.lock irq_context: softirq rcu_read_lock &br->multicast_lock irq_context: softirq rcu_read_lock &br->multicast_lock pool_lock#2 irq_context: softirq rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: softirq rcu_read_lock &br->multicast_lock &____s->seqcount irq_context: softirq rcu_read_lock &br->multicast_lock rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock &br->multicast_lock &dir->lock#2 irq_context: softirq rcu_read_lock &br->multicast_lock deferred_lock irq_context: softirq rcu_read_lock &br->multicast_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock &br->multicast_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &br->multicast_lock nl_table_lock irq_context: softirq rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: softirq rcu_read_lock &br->multicast_lock &base->lock irq_context: softirq rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock crngs.lock irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_addr_sem team->team_lock_key#2 irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: softirq rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &n->list_lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key lweventlist_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &meta->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 kfence_freelist_lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock &____s->seqcount irq_context: 0 rtnl_mutex &ndev->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex &ndev->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &____s->seqcount irq_context: softirq (&tun->flow_gc_timer) irq_context: softirq (&tun->flow_gc_timer) &tun->lock irq_context: softirq &(&conn->info_timer)->timer irq_context: softirq &(&conn->info_timer)->timer rcu_read_lock &pool->lock irq_context: softirq &(&conn->info_timer)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&conn->info_timer)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&conn->info_timer)->work) &conn->chan_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 lweventlist_lock &dir->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock quarantine_lock irq_context: 0 rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_addr_sem team->team_lock_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex _xmit_ETHER/1 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner irq_context: 0 wq_pool_attach_mutex wq_pool_attach_mutex.wait_lock irq_context: 0 wq_pool_attach_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 wq_pool_attach_mutex.wait_lock irq_context: 0 &pool->lock wq_mayday_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex quarantine_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 rtnl_mutex dev_addr_sem &pcp->lock &zone->lock irq_context: 0 rtnl_mutex dev_addr_sem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex dev_addr_sem fill_pool_map-wait-type-override &rq->__lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_addr_sem fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#6 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#6 &devlink_port->type_lock irq_context: softirq rcu_read_lock &br->hash_lock &c->lock irq_context: softirq rcu_read_lock &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock quarantine_lock irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 &c->lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 &____s->seqcount irq_context: softirq (&hsr->announce_timer) irq_context: softirq (&hsr->announce_timer) rcu_read_lock pool_lock#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock &____s->seqcount irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock pool_lock#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock &hsr->list_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock &new_node->seq_out_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock &c->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock &obj_hash[i].lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock pool_lock#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &obj_hash[i].lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &base->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex devnet_rename_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &nsim_dev->fa_cookie_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &____s->seqcount irq_context: softirq rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex pgd_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_state.exp_mutex key irq_context: 0 rtnl_mutex rcu_state.exp_mutex pcpu_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex percpu_counters_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex pool_lock#2 irq_context: 0 rtnl_mutex rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 nl_table_lock nl_table_wait.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem team->team_lock_key#4 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 &n->list_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem team->team_lock_key#5 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 &c->lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock &br->multicast_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock quarantine_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock kfence_freelist_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER/1 &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER/1 &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock quarantine_lock irq_context: softirq rcu_read_lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &new_node->seq_out_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem pgd_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem key irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem pcpu_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem percpu_counters_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &rq->__lock irq_context: 0 rtnl_mutex remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 rtnl_mutex remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &n->list_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &pcp->lock &zone->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex &nn->netlink_tap_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem pcpu_alloc_mutex &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex j1939_netdev_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work rcu_node_0 irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work rcu_read_lock &rq->__lock irq_context: softirq rcu_read_lock &br->hash_lock &____s->seqcount irq_context: softirq rcu_read_lock &br->multicast_lock &n->list_lock irq_context: softirq rcu_read_lock &br->multicast_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &bat_priv->tt.changes_list_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &bat_priv->tt.changes_list_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &bat_priv->tt.changes_list_lock pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock key#16 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock krc.lock &base->lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key/1 irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &new_node->seq_out_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock pool_lock irq_context: softirq rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &tb->tb6_lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex &tb->tb6_lock kfence_freelist_lock irq_context: 0 rtnl_mutex &tb->tb6_lock &meta->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &hsr->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &rq->__lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)events deferred_process_work &rq->__lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &____s->seqcount irq_context: 0 pcpu_alloc_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 pcpu_alloc_mutex rcu_read_lock &rq->__lock irq_context: 0 pcpu_alloc_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock quarantine_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &c->lock irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 rtnl_mutex dev_addr_sem quarantine_lock irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER (console_sem).lock irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER console_lock console_srcu console_owner irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_node_0 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &meta->lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) kfence_freelist_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex _xmit_ETHER (console_sem).lock irq_context: 0 rtnl_mutex _xmit_ETHER console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex _xmit_ETHER console_lock console_srcu console_owner irq_context: 0 rtnl_mutex _xmit_ETHER console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex _xmit_ETHER console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock &list->lock#16 irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) &list->lock#16 irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem team->team_lock_key#6 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &ipvlan->addrs_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &ipvlan->addrs_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &n->list_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &ipvlan->addrs_lock irq_context: 0 rtnl_mutex rcu_read_lock &ipvlan->addrs_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &ipvlan->addrs_lock &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock &ipvlan->addrs_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3/1 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock kfence_freelist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &meta->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 _xmit_ETHER &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 _xmit_ETHER &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 rtnl_mutex dev_addr_sem &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh pool_lock#2 irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock pool_lock#2 irq_context: softirq (&hsr->prune_timer) irq_context: softirq (&hsr->prune_timer) &hsr->list_lock irq_context: softirq (&hsr->prune_timer) &obj_hash[i].lock irq_context: softirq (&hsr->prune_timer) &base->lock irq_context: softirq (&hsr->prune_timer) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex dev_addr_sem &hard_iface->bat_iv.ogm_buff_mutex irq_context: 0 rtnl_mutex key#20 irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq &(&bat_priv->tt.work)->timer irq_context: softirq &(&bat_priv->tt.work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bat_priv->tt.work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bat_priv->tt.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) key#16 irq_context: 0 rtnl_mutex &bat_priv->tt.commit_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) key#21 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &bat_priv->tt.req_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &bat_priv->tt.roam_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock &n->list_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key/1 &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key/1 &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock (console_sem).lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock console_lock console_srcu console_owner irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock &c->lock irq_context: softirq &(&bat_priv->tt.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq &(&bat_priv->tt.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bat_priv->tt.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &sb->s_type->i_lock_key#8 irq_context: 0 rtnl_mutex &dir->lock irq_context: 0 rtnl_mutex k-sk_lock-AF_INET irq_context: 0 rtnl_mutex k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 rtnl_mutex k-sk_lock-AF_INET &table->hash[i].lock irq_context: 0 rtnl_mutex k-sk_lock-AF_INET &table->hash[i].lock k-clock-AF_INET irq_context: 0 rtnl_mutex k-sk_lock-AF_INET &table->hash[i].lock &table->hash2[i].lock irq_context: 0 rtnl_mutex k-sk_lock-AF_INET &rq->__lock irq_context: 0 rtnl_mutex k-slock-AF_INET irq_context: 0 rtnl_mutex rcu_read_lock (console_sem).lock irq_context: 0 rtnl_mutex rcu_read_lock console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex rcu_read_lock console_lock console_srcu console_owner irq_context: 0 rtnl_mutex rcu_read_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex rcu_read_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh key#20 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &entry->crc_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock nl_table_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rlock-AF_NETLINK irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock nl_table_wait.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock lock#8 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock id_table_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &dir->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock krc.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock krc.lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &meta->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock crngs.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock quarantine_lock irq_context: 0 rtnl_mutex k-sk_lock-AF_INET6 irq_context: 0 rtnl_mutex k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 rtnl_mutex k-slock-AF_INET6 irq_context: 0 rtnl_mutex k-sk_lock-AF_INET6 &table->hash[i].lock irq_context: 0 rtnl_mutex k-sk_lock-AF_INET6 &table->hash[i].lock k-clock-AF_INET6 irq_context: 0 rtnl_mutex k-sk_lock-AF_INET6 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock irq_context: 0 rtnl_mutex &wg->device_update_lock fs_reclaim irq_context: 0 rtnl_mutex &wg->device_update_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &wg->device_update_lock pool_lock#2 irq_context: 0 rtnl_mutex &wg->device_update_lock mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &wg->device_update_lock &sb->s_type->i_lock_key#8 irq_context: 0 rtnl_mutex &wg->device_update_lock &dir->lock irq_context: 0 rtnl_mutex &wg->device_update_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET &table->hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET &table->hash[i].lock k-clock-AF_INET irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET &table->hash[i].lock &table->hash2[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock k-slock-AF_INET irq_context: 0 rtnl_mutex &wg->device_update_lock cpu_hotplug_lock irq_context: 0 rtnl_mutex &wg->device_update_lock cpu_hotplug_lock jump_label_mutex irq_context: 0 rtnl_mutex &wg->device_update_lock cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 rtnl_mutex &wg->device_update_lock cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET6 irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 rtnl_mutex &wg->device_update_lock k-slock-AF_INET6 irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET6 &table->hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET6 &table->hash[i].lock k-clock-AF_INET6 irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET6 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock &wg->socket_update_lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 rtnl_mutex &wg->device_update_lock &list->lock#17 irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &wg->device_update_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET batched_entropy_u32.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 rtnl_mutex &wg->device_update_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock key#16 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tt.changes_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tt.changes_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tt.changes_list_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tt.last_changeset_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tt.last_changeset_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tvlv.container_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#17 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &new_node->seq_out_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &tbl->lock batched_entropy_u32.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &tbl->lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#2 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#2 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#17 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh &r->producer_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: softirq &keypair->receiving_counter.lock irq_context: softirq &peer->keypairs.keypair_update_lock irq_context: softirq &list->lock#17 irq_context: softirq rcu_read_lock_bh &r->producer_lock#2 irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) &ndev->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock &ndev->lock irq_context: softirq (&ndev->rs_timer) pool_lock#2 irq_context: softirq (&ndev->rs_timer) &dir->lock#2 irq_context: softirq (&ndev->rs_timer) &ul->lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#12 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &ndev->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq (&ndev->rs_timer) &ndev->lock batched_entropy_u32.lock irq_context: softirq (&ndev->rs_timer) &ndev->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) &ndev->lock &base->lock irq_context: softirq (&ndev->rs_timer) &ndev->lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#3 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 rtnl_mutex &wg->device_update_lock &____s->seqcount irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)wg-kex-wg2 irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg2#2 irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#17 irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-crypt-wg2 irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 misc_mtx rfkill_global_mutex irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx fs_reclaim irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx pool_lock#2 irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx &rfkill->lock irq_context: 0 cb_lock genl_mutex rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 cb_lock rcu_read_lock pool_lock#2 irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 cb_lock genl_mutex hwsim_radio_lock irq_context: 0 cb_lock genl_mutex &x->wait#9 irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override pgd_lock irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override key irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override pcpu_lock irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override percpu_counters_lock irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex batched_entropy_u32.lock irq_context: 0 cb_lock genl_mutex &obj_hash[i].lock pool_lock irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock genl_mutex &k->list_lock irq_context: 0 cb_lock genl_mutex gdp_mutex irq_context: 0 cb_lock genl_mutex gdp_mutex &k->list_lock irq_context: 0 cb_lock genl_mutex lock irq_context: 0 cb_lock genl_mutex lock kernfs_idr_lock irq_context: 0 cb_lock genl_mutex &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock genl_mutex bus_type_sem irq_context: 0 cb_lock genl_mutex pgd_lock irq_context: 0 cb_lock genl_mutex key irq_context: 0 cb_lock genl_mutex pcpu_lock irq_context: 0 cb_lock genl_mutex percpu_counters_lock irq_context: 0 cb_lock genl_mutex &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex sysfs_symlink_target_lock irq_context: 0 cb_lock genl_mutex &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex &dev->power.lock irq_context: 0 cb_lock genl_mutex dpm_list_mtx irq_context: 0 cb_lock genl_mutex uevent_sock_mutex irq_context: 0 cb_lock genl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex subsys mutex#52 irq_context: 0 cb_lock genl_mutex subsys mutex#52 &k->k_lock irq_context: 0 cb_lock genl_mutex device_links_lock irq_context: 0 cb_lock genl_mutex &k->k_lock irq_context: 0 cb_lock genl_mutex deferred_probe_mutex irq_context: 0 cb_lock genl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock genl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 &c->lock irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 &____s->seqcount irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex &____s->seqcount irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 cb_lock genl_mutex wq_pool_mutex irq_context: 0 cb_lock genl_mutex wq_pool_mutex &wq->mutex irq_context: 0 cb_lock genl_mutex crngs.lock irq_context: 0 cb_lock genl_mutex triggers_list_lock irq_context: 0 cb_lock genl_mutex leds_list_lock irq_context: 0 cb_lock genl_mutex leds_list_lock &led_cdev->trigger_lock irq_context: 0 cb_lock genl_mutex &zone->lock irq_context: 0 cb_lock genl_mutex &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex &wg->device_update_lock &n->list_lock irq_context: 0 rtnl_mutex &wg->device_update_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#3 irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#4 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#4 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#17 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 rtnl_mutex &bat_priv->softif_vlan_list_lock &c->lock irq_context: 0 rtnl_mutex &bat_priv->softif_vlan_list_lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#2 irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh key#20 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &entry->crc_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex param_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex param_lock rate_ctrl_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex (console_sem).lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &k->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &k->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex lock kernfs_idr_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex kobj_ns_type_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx bus_type_sem irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx sysfs_symlink_target_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &dev->power.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx dpm_list_mtx irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex nl_table_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex nl_table_wait.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &k->k_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx subsys mutex#53 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx subsys mutex#53 &k->k_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx pin_fs_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx nl_table_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx nl_table_wait.lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &n->list_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx reg_requests_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &base->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex irq_context: 0 cb_lock genl_mutex rfkill_global_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex rfkill_global_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rfkill_global_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &k->list_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &c->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &____s->seqcount irq_context: 0 cb_lock genl_mutex rfkill_global_mutex lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex lock kernfs_idr_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock genl_mutex rfkill_global_mutex bus_type_sem irq_context: 0 cb_lock genl_mutex rfkill_global_mutex sysfs_symlink_target_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &dev->power.lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex dpm_list_mtx irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &rfkill->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex nl_table_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &k->k_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex subsys mutex#40 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex subsys mutex#40 &k->k_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex triggers_list_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex leds_list_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex leds_list_lock &led_cdev->trigger_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex.wait_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex pin_fs_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx stack_depot_init_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx pcpu_alloc_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx pcpu_alloc_mutex pcpu_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &local->iflist_mtx irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx net_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx net_rwsem &list->lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &tn->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &x->wait#9 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx subsys mutex#17 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx subsys mutex#17 &k->k_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &dir->lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx dev_hotplug_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx dev_hotplug_mutex &dev->power.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx dev_base_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx input_pool.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx batched_entropy_u32.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &tbl->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx sysctl_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx failover_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx proc_subdir_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx proc_inum_ida.xa_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx proc_subdir_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &obj_hash[i].lock pool_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock _xmit_ETHER irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &pnettable->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx smc_ib_devices.mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &ndev->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &wdev->mtx irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &fq->lock irq_context: 0 cb_lock genl_mutex (inetaddr_chain).rwsem irq_context: 0 cb_lock genl_mutex inet6addr_chain.lock irq_context: 0 cb_lock genl_mutex hwsim_radio_lock rcu_read_lock rhashtable_bucket irq_context: 0 cb_lock genl_mutex nl_table_lock irq_context: 0 cb_lock genl_mutex nl_table_wait.lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->event_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &rdev->mgmt_registrations_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&(&sdata->dec_tailroom_needed_wk)->work) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx pin_fs_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &dentry->d_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &xa->xa_lock#7 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock pool_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &fsnotify_mark_srcu irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &s->s_inode_list_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &xa->xa_lock#7 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock mount_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx mount_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx mount_lock mount_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &rdev->wiphy_work_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (&dwork->timer) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &base->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&(&link->color_collision_detect_work)->work) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->chanctx_mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx fs_reclaim irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &____s->seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &fq->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx nl_table_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx nl_table_wait.lock irq_context: 0 cb_lock rtnl_mutex.wait_lock irq_context: 0 cb_lock rcu_read_lock &c->lock irq_context: 0 cb_lock rcu_read_lock &n->list_lock irq_context: 0 cb_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 cb_lock rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 cb_lock rtnl_mutex &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy3 irq_context: 0 (wq_completion)phy3 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy3 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &new_node->seq_out_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rcu_read_lock &c->lock irq_context: 0 cb_lock genl_mutex &pcp->lock &zone->lock irq_context: 0 cb_lock genl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cb_lock genl_mutex uevent_sock_mutex &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#3 irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 cb_lock genl_mutex uevent_sock_mutex &n->list_lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#4 irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#17 irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx rtnl_mutex.wait_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &p->pi_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->chanctx_mtx irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &rdev->wiphy_work_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &rdev->wiphy_work_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &list->lock#18 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &ifibss->incomplete_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx (console_sem).lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx console_lock console_srcu console_owner irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &meta->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &data->mutex irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx hrtimer_bases.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &base->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &wdev->event_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)cfg80211 irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &lock->wait_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &rq->__lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &lock->wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rq->__lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->event_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx fs_reclaim irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx pool_lock#2 irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &obj_hash[i].lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx nl_table_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx nl_table_wait.lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &c->lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &____s->seqcount irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &list->lock#2 irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &obj_hash[i].lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx pool_lock#2 irq_context: 0 rtnl_mutex net_rwsem pool_lock#2 irq_context: 0 rtnl_mutex net_rwsem &obj_hash[i].lock irq_context: 0 rtnl_mutex net_rwsem nl_table_lock irq_context: 0 rtnl_mutex net_rwsem nl_table_wait.lock irq_context: 0 cb_lock genl_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem quarantine_lock irq_context: softirq rcu_read_lock hwsim_radio_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx quarantine_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu quarantine_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock genl_mutex hwsim_radio_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 cb_lock genl_mutex hwsim_radio_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_radio_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock genl_mutex hwsim_radio_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_radio_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex hwsim_radio_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock rcu_read_lock rcu_node_0 irq_context: 0 cb_lock rcu_read_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events wireless_nlevent_work irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem &list->lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock kfence_freelist_lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER &c->lock irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER &____s->seqcount irq_context: 0 (wq_completion)phy4 irq_context: 0 (wq_completion)phy4 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy4 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &pcp->lock &zone->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &n->list_lock &c->lock irq_context: 0 tomoyo_ss &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 &c->lock irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 &____s->seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex console_owner_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex console_owner irq_context: 0 cb_lock genl_mutex rtnl_mutex console_lock console_srcu console_owner_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex console_lock console_srcu console_owner irq_context: 0 cb_lock genl_mutex rtnl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override pool_lock irq_context: 0 kn->active#5 rcu_read_lock pool_lock#2 irq_context: 0 kn->active#5 &obj_hash[i].lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 tomoyo_ss mount_lock irq_context: 0 tomoyo_ss mount_lock rcu_read_lock rename_lock.seqcount irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &pcp->lock &zone->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &lock->wait_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock_bh quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &lock->wait_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &pcp->lock &zone->lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem &c->lock irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem pool_lock#2 irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem nl_table_lock irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem nl_table_wait.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh quarantine_lock irq_context: 0 (wq_completion)phy5 irq_context: 0 (wq_completion)phy5 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy5 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)phy6 irq_context: 0 (wq_completion)phy6 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy6 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx console_owner irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &c->lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &n->list_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &n->list_lock &c->lock irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem &n->list_lock irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem &n->list_lock &c->lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock hwsim_radio_lock pool_lock#2 irq_context: softirq rcu_read_lock hwsim_radio_lock &list->lock#19 irq_context: softirq &list->lock#19 irq_context: softirq rcu_read_lock lock#6 irq_context: softirq rcu_read_lock lock#6 kcov_remote_lock irq_context: softirq rcu_read_lock &local->rx_path_lock irq_context: softirq rcu_read_lock &local->rx_path_lock &list->lock#18 irq_context: softirq rcu_read_lock &local->rx_path_lock &rdev->wiphy_work_lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx lock#6 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx lock#6 kcov_remote_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->chanctx_mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &sta->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &sta->lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &sta->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &sta->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->rate_ctrl_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->rate_ctrl_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->chanctx_mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx pin_fs_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx nl_table_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx nl_table_wait.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock krc.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx remove_cache_srcu irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx remove_cache_srcu &obj_hash[i].lock irq_context: 0 &type->s_umount_key#46/1 irq_context: 0 &type->s_umount_key#46/1 fs_reclaim irq_context: 0 &type->s_umount_key#46/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#46/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#46/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#46/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#46/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#46/1 sb_lock irq_context: 0 &type->s_umount_key#46/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#46/1 pool_lock#2 irq_context: 0 &type->s_umount_key#46/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_lock_key#32 irq_context: 0 &type->s_umount_key#46/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#46/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_lock_key#32 &dentry->d_lock irq_context: 0 &type->s_umount_key#46/1 binderfs_minors_mutex irq_context: 0 &type->s_umount_key#46/1 binderfs_minors_mutex binderfs_minors.xa_lock irq_context: 0 &type->s_umount_key#46/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#46/1 &c->lock irq_context: 0 &type->s_umount_key#46/1 &____s->seqcount irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &sb->s_type->i_lock_key#32 irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 rename_lock.seqcount irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 fs_reclaim irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 pool_lock#2 irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &dentry->d_lock irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &dentry->d_lock &wq irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &sb->s_type->i_lock_key#32 &dentry->d_lock irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &c->lock irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 rcu_read_lock iunique_lock irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 tk_core.seq.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 rename_lock.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 fs_reclaim irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &dentry->d_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &root->kernfs_rwsem irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex fs_reclaim irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex pool_lock#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &obj_hash[i].lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex css_set_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex cgroup_file_kn_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &____s->seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex batched_entropy_u32.lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex lock cgroup_idr_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex cgroup_idr_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex task_group_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &rq->__lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &dentry->d_lock &lru->node[i].lock irq_context: 0 &type->i_mutex_dir_key#6 irq_context: 0 &type->i_mutex_dir_key#6 fs_reclaim irq_context: 0 &type->i_mutex_dir_key#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#6 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#6 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem inode_hash_lock irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem fs_reclaim irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#30 irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &sb->s_type->i_lock_key#30 irq_context: 0 &type->i_mutex_dir_key#6 &sb->s_type->i_lock_key#30 irq_context: 0 &type->i_mutex_dir_key#6 &sb->s_type->i_lock_key#30 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#6 &sb->s_type->i_lock_key#30 &dentry->d_lock &wq irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &c->lock irq_context: 0 kn->active#53 fs_reclaim irq_context: 0 kn->active#53 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#53 &c->lock irq_context: 0 kn->active#53 &____s->seqcount irq_context: 0 kn->active#53 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#53 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#53 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#54 fs_reclaim irq_context: 0 kn->active#54 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#54 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#54 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#54 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cgroup_threadgroup_rwsem.rss.gp_wait.lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock pidmap_lock &____s->seqcount irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem inode_hash_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem fs_reclaim irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem pool_lock#2 irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem inode_hash_lock &sb->s_type->i_lock_key#30 irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &sb->s_type->i_lock_key#30 irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &s->s_inode_list_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &xa->xa_lock#7 irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &fsnotify_mark_srcu irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock &base->lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->pi_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &rq->__lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)events kernfs_notify_work &root->kernfs_supers_rwsem inode_hash_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.waiters.lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.rss.gp_wait.lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex (wq_completion)cpuset_migrate_mm irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex &wq->mutex irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex &wq->mutex &pool->lock/1 irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex &wq->mutex &x->wait#10 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 rename_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &dentry->d_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex css_set_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock cgroup_idr_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cgroup_idr_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem cpuset_rwsem.rss.gp_wait.lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem jump_label_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem jump_label_mutex text_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem callback_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem.waiters.lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem.rss.gp_wait.lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &____s->seqcount#2 irq_context: 0 &____s->seqcount#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &n->list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex percpu_counters_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem &n->list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem &n->list_lock &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex batched_entropy_u8.lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &pgdat->memcg_lru.lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &dentry->d_lock &lru->node[i].lock irq_context: 0 &type->i_mutex_dir_key#7 irq_context: 0 &type->i_mutex_dir_key#7 fs_reclaim irq_context: 0 &type->i_mutex_dir_key#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#7 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#7 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem inode_hash_lock irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem fs_reclaim irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#31 irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &sb->s_type->i_lock_key#31 irq_context: 0 &type->i_mutex_dir_key#7 &sb->s_type->i_lock_key#31 irq_context: 0 &type->i_mutex_dir_key#7 &sb->s_type->i_lock_key#31 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#7 &sb->s_type->i_lock_key#31 &dentry->d_lock &wq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &____s->seqcount#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 pool_lock#2 irq_context: 0 kn->active#55 fs_reclaim irq_context: 0 kn->active#55 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#55 &c->lock irq_context: 0 kn->active#55 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#55 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#55 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cgroup_threadgroup_rwsem.rss.gp_wait.lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem fs_reclaim irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem pool_lock#2 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &c->lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem cpuset_rwsem.rss.gp_wait.lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem.waiters.lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem.rss.gp_wait.lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem &p->pi_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem &p->alloc_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem &p->alloc_lock &____s->seqcount#2 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_rwsem cpuset_attach_wq.lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->alloc_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->alloc_lock &memcg->mm_list.lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &obj_hash[i].lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock pool_lock#2 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock krc.lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.waiters.lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.rss.gp_wait.lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex (wq_completion)cpuset_migrate_mm irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex &wq->mutex irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex &wq->mutex &pool->lock/1 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex &wq->mutex &x->wait#10 irq_context: 0 stock_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &xa->xa_lock#3 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &xa->xa_lock#3 pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 stock_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &xa->xa_lock#3 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &xa->xa_lock#3 pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem stock_lock irq_context: 0 kn->active#56 fs_reclaim irq_context: 0 kn->active#56 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#56 stock_lock irq_context: 0 kn->active#56 pool_lock#2 irq_context: 0 kn->active#56 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#56 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#56 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#56 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#56 &kernfs_locks->open_file_mutex[count] &____s->seqcount#2 irq_context: 0 kn->active#56 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 sb_writers#11 &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &____s->seqcount#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &____s->seqcount irq_context: 0 kn->active#57 fs_reclaim irq_context: 0 kn->active#57 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#57 stock_lock irq_context: 0 kn->active#57 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#57 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#57 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &of->mutex kn->active#57 memcg_max_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &xa->xa_lock#3 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &xa->xa_lock#3 pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 stock_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex devcgroup_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock freezer_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rtnl_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rtnl_mutex &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex dev_addr_sem &____s->seqcount#2 irq_context: 0 rtnl_mutex &____s->seqcount#2 irq_context: softirq rcu_read_lock hwsim_radio_lock &c->lock irq_context: 0 rtnl_mutex _xmit_ETHER &____s->seqcount#2 irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &____s->seqcount#2 irq_context: 0 tomoyo_ss &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &____s->seqcount#2 irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rtnl_mutex &rq->__lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &n->list_lock &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rtnl_mutex.wait_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &p->pi_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#7 stock_lock irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &xa->xa_lock#3 irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &xa->xa_lock#3 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem stock_lock irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &c->lock irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &____s->seqcount#2 irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &____s->seqcount irq_context: 0 kn->active#55 stock_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex freezer_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex freezer_lock rcu_read_lock &sighand->siglock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex freezer_lock &sighand->siglock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex freezer_lock &sighand->siglock &p->pi_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex freezer_lock &sighand->siglock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->alloc_lock &newf->file_lock irq_context: 0 &xa->xa_lock#3 pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &____s->seqcount#2 irq_context: 0 nf_hook_mutex irq_context: 0 nf_hook_mutex fs_reclaim irq_context: 0 nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nf_hook_mutex stock_lock irq_context: 0 nf_hook_mutex pool_lock#2 irq_context: 0 ebt_mutex &mm->mmap_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET elock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 stock_lock irq_context: 0 &mm->mmap_lock stock_lock irq_context: softirq rcu_callback cgroup_threadgroup_rwsem.rss.gp_wait.lock irq_context: softirq rcu_callback cgroup_threadgroup_rwsem.rss.gp_wait.lock &obj_hash[i].lock irq_context: softirq rcu_callback stock_lock irq_context: 0 &xt[i].mutex &c->lock irq_context: 0 &xt[i].mutex &n->list_lock irq_context: 0 &xt[i].mutex &____s->seqcount irq_context: 0 &xt[i].mutex &mm->mmap_lock irq_context: 0 &xt[i].mutex free_vmap_area_lock irq_context: 0 &xt[i].mutex free_vmap_area_lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex free_vmap_area_lock pool_lock#2 irq_context: 0 &xt[i].mutex vmap_area_lock irq_context: 0 &xt[i].mutex &per_cpu(xt_recseq, i) irq_context: 0 &xt[i].mutex &obj_hash[i].lock irq_context: 0 &xt[i].mutex purge_vmap_area_lock irq_context: 0 &xt[i].mutex rcu_read_lock pool_lock#2 irq_context: 0 nf_nat_proto_mutex irq_context: 0 nf_nat_proto_mutex fs_reclaim irq_context: 0 nf_nat_proto_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nf_nat_proto_mutex pool_lock#2 irq_context: 0 nf_nat_proto_mutex nf_hook_mutex irq_context: 0 nf_nat_proto_mutex nf_hook_mutex fs_reclaim irq_context: 0 nf_nat_proto_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nf_nat_proto_mutex nf_hook_mutex stock_lock irq_context: 0 nf_nat_proto_mutex nf_hook_mutex pool_lock#2 irq_context: 0 nf_nat_proto_mutex cpu_hotplug_lock irq_context: 0 nf_nat_proto_mutex &obj_hash[i].lock irq_context: 0 nf_nat_proto_mutex stock_lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &____s->seqcount#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &____s->seqcount#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &____s->seqcount#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &pcp->lock &zone->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 kn->active#55 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#55 &kernfs_locks->open_file_mutex[count] &____s->seqcount#2 irq_context: 0 kn->active#55 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#57 &c->lock irq_context: 0 kn->active#57 &____s->seqcount#2 irq_context: 0 kn->active#57 &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss &____s->seqcount#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 nf_hook_mutex &c->lock irq_context: 0 nf_hook_mutex &____s->seqcount#2 irq_context: 0 nf_hook_mutex &____s->seqcount irq_context: 0 nf_nat_proto_mutex &c->lock irq_context: 0 nf_nat_proto_mutex &____s->seqcount#2 irq_context: 0 nf_nat_proto_mutex &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 clock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 elock-AF_INET6 irq_context: 0 &pipe->mutex/1 stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &xa->xa_lock#3 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &xa->xa_lock#3 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &n->list_lock irq_context: 0 loop_validate_mutex irq_context: 0 loop_validate_mutex &lo->lo_mutex irq_context: 0 &fsnotify_mark_srcu fs_reclaim irq_context: 0 &fsnotify_mark_srcu fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &fsnotify_mark_srcu pool_lock#2 irq_context: 0 &fsnotify_mark_srcu &group->notification_lock irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &ep->lock irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &ep->lock &ep->wq irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &ep->lock &ep->wq &p->pi_lock irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#4 &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 &xt[i].mutex purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex purge_vmap_area_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#4 &____s->seqcount#2 irq_context: 0 &xt[i].mutex &____s->seqcount#2 irq_context: 0 &pipe->mutex/1 &mm->mmap_lock stock_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &____s->seqcount#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 stock_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem stock_lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex irq_context: 0 &vma->vm_lock->lock stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#9 &xa->xa_lock#3 irq_context: 0 &sb->s_type->i_mutex_key#9 &xa->xa_lock#3 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#9 stock_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 stock_lock irq_context: 0 &r->consumer_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock stock_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock console_owner_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock console_owner irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#7 stock_lock irq_context: 0 rds_sock_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &____s->seqcount#2 irq_context: 0 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_RDS irq_context: 0 &sb->s_type->i_mutex_key#10 &rs->rs_recv_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rds_cong_monitor_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rds_cong_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rs->rs_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rs->rs_rdma_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &q->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rds_sock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->ipv4.ra_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 &hashinfo->lock irq_context: 0 &sighand->siglock &p->pi_lock irq_context: 0 tasklist_lock &p->alloc_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem stock_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &meta->lock irq_context: 0 pcpu_lock stock_lock irq_context: 0 remove_cache_srcu pool_lock#2 irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &____s->seqcount#2 irq_context: 0 &p->lock &____s->seqcount#2 irq_context: 0 sb_writers#8 kn->active#5 &c->lock irq_context: 0 kn->active#5 &____s->seqcount#2 irq_context: 0 tasklist_lock stock_lock irq_context: 0 &group->notification_waitq &ep->lock irq_context: 0 &group->notification_waitq &ep->lock &ep->wq irq_context: 0 &group->notification_waitq &ep->lock &ep->wq &p->pi_lock irq_context: 0 &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &ei->i_es_lock irq_context: 0 &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &fsnotify_mark_srcu irq_context: 0 sb_writers#4 &s->s_inode_list_lock irq_context: 0 sb_writers#4 sb_internal irq_context: 0 sb_writers#4 sb_internal mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 sb_internal pool_lock#2 irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &xa->xa_lock#7 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 sb_internal &obj_hash[i].lock irq_context: 0 sb_writers#4 inode_hash_lock irq_context: 0 sb_writers#4 inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &fsnotify_mark_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &sbi->s_orphan_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &dentry->d_lock &dentry->d_lock/1 &lru->node[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &dentry->d_lock &dentry->d_lock &lru->node[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &wb->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &sb->s_type->i_lock_key#5 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &xa->xa_lock#7 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &____s->seqcount#2 irq_context: 0 &p->lock &of->mutex kn->active#5 &____s->seqcount#2 irq_context: softirq fs/notify/mark.c:89 rcu_read_lock &pool->lock/1 irq_context: softirq fs/notify/mark.c:89 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq fs/notify/mark.c:89 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq fs/notify/mark.c:89 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq fs/notify/mark.c:89 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex &rq->__lock irq_context: 0 &xt[i].mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) &ACCESS_PRIVATE(sdp, lock) irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex init_mm.page_table_lock irq_context: 0 &xt[i].mutex &pcp->lock &zone->lock irq_context: 0 &xt[i].mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 tomoyo_ss &____s->seqcount#2 irq_context: 0 sb_writers#8 &xattrs->lock irq_context: 0 purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 purge_vmap_area_lock pool_lock#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &____s->seqcount#2 irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 cgroup_threadgroup_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sighand->siglock stock_lock irq_context: 0 &sighand->siglock &p->pi_lock &rq->__lock irq_context: 0 &sighand->siglock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &n->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 pool_lock#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) quarantine_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &c->lock irq_context: 0 sb_writers#4 sb_internal &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &obj_hash[i].lock pool_lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 _xmit_ETHER &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events_unbound connector_reaper_work &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &____s->seqcount#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex dev_addr_sem fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 rtnl_mutex dev_addr_sem fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &fq->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &list->lock#19 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &list->lock#19 irq_context: softirq (&peer->timer_persistent_keepalive) irq_context: softirq (&peer->timer_persistent_keepalive) pool_lock#2 irq_context: softirq (&peer->timer_persistent_keepalive) &c->lock irq_context: softirq (&peer->timer_persistent_keepalive) &n->list_lock irq_context: softirq (&peer->timer_persistent_keepalive) &n->list_lock &c->lock irq_context: softirq (&peer->timer_persistent_keepalive) &list->lock#17 irq_context: softirq (&peer->timer_persistent_keepalive) tk_core.seq.seqcount irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh &r->producer_lock#2 irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 &c->lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 &____s->seqcount#2 irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_node_0 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 &mm->mmap_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &____s->seqcount irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rq->__lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 nf_nat_proto_mutex nf_hook_mutex &c->lock irq_context: 0 nf_nat_proto_mutex nf_hook_mutex &____s->seqcount#2 irq_context: 0 nf_nat_proto_mutex nf_hook_mutex &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &xt[i].mutex remove_cache_srcu irq_context: 0 &xt[i].mutex remove_cache_srcu quarantine_lock irq_context: 0 &type->i_mutex_dir_key#4 batched_entropy_u8.lock irq_context: 0 &type->i_mutex_dir_key#4 kfence_freelist_lock irq_context: 0 sb_writers#4 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock pool_lock#2 irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 &xt[i].mutex &mm->mmap_lock &rq->__lock irq_context: 0 &xt[i].mutex &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock rlock-AF_PACKET irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock rcu_read_lock &ei->socket.wq.wait irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) pool_lock#2 irq_context: 0 rtnl_mutex lweventlist_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) rcu_read_lock &list->lock#5 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock &____s->seqcount#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &____s->seqcount#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN slock-AF_CAN irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN clock-AF_CAN irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_CAN irq_context: 0 &sighand->siglock &p->pi_lock &cfs_rq->removed.lock irq_context: softirq &(&ifa->dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &ndev->lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock &ndev->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock pool_lock#2 irq_context: softirq rcu_callback rlock-AF_CAN irq_context: softirq rcu_callback elock-AF_CAN irq_context: 0 remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &____s->seqcount#2 irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 (wq_completion)rcu_gp &rq->__lock irq_context: 0 (wq_completion)rcu_gp &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 fs_reclaim &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#9 kfence_freelist_lock irq_context: 0 &type->i_mutex_dir_key#5 &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#3 &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#7 &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh key#20 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &entry->crc_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) &obj_hash[i].lock pool_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &____s->seqcount#2 irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 lock#4 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#5 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_es_lock key#6 irq_context: 0 &xt[i].mutex rcu_read_lock rcu_node_0 irq_context: 0 &xt[i].mutex rcu_read_lock &rq->__lock irq_context: 0 &xt[i].mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ei->socket.wq.wait irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) batched_entropy_u8.lock irq_context: softirq (&ndev->rs_timer) kfence_freelist_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &meta->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock kfence_freelist_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock once_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock once_lock crngs.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock once_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &ei->i_es_lock key#2 irq_context: softirq (&ndev->rs_timer) &n->list_lock irq_context: softirq (&ndev->rs_timer) &n->list_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock pgd_lock irq_context: 0 rtnl_mutex rcu_read_lock stock_lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock key irq_context: 0 rtnl_mutex rcu_read_lock pcpu_lock irq_context: 0 rtnl_mutex rcu_read_lock percpu_counters_lock irq_context: 0 rtnl_mutex rcu_read_lock pcpu_lock stock_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 &____s->seqcount irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg0#6 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 rtnl_mutex &wg->device_update_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#5 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg0#7 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#6 irq_context: 0 (wq_completion)wg-kex-wg1#6 &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#17 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_callback stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg0#3 irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#5 irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 cb_lock genl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#6 irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#17 irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#3 irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#7 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg2#7 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#8 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#8 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#17 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg0#8 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#4 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#17 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#4 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#4 irq_context: 0 (wq_completion)wg-crypt-wg0#4 &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 cb_lock genl_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex &c->lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock hwsim_radio_lock &n->list_lock irq_context: softirq rcu_read_lock hwsim_radio_lock &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &rq->__lock irq_context: 0 cb_lock genl_mutex quarantine_lock irq_context: 0 cb_lock remove_cache_srcu &rq->__lock irq_context: 0 cb_lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 cb_lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 cb_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &tb->tb6_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#9 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock crngs.lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &____s->seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &obj_hash[i].lock pool_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)phy7 irq_context: 0 (wq_completion)phy7 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy7 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 (wq_completion)wg-kex-wg1#9 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &lock->wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 cb_lock &lock->wait_lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &fq->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &list->lock#19 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &list->lock#19 irq_context: 0 (wq_completion)wg-kex-wg0#10 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#17 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 remove_cache_srcu irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 remove_cache_srcu quarantine_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 remove_cache_srcu &c->lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 remove_cache_srcu &n->list_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg2#9 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#10 irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#17 irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &obj_hash[i].lock pool_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fs_reclaim &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 cb_lock &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu quarantine_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &n->list_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)phy9 irq_context: 0 (wq_completion)phy9 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy9 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#11 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)phy8 irq_context: 0 (wq_completion)phy8 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy8 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#11 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#12 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#12 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#17 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#6 irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#11 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#12 irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#17 irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#6 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#6 irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 cb_lock genl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 &type->s_umount_key#46/1 &____s->seqcount#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss &____s->seqcount#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &c->lock irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &____s->seqcount#2 irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &____s->seqcount irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock rcu_node_0 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: softirq rcu_read_lock &ifibss->incomplete_lock irq_context: softirq rcu_read_lock &rdev->wiphy_work_lock irq_context: softirq rcu_read_lock &local->rx_path_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &local->rx_path_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &pcp->lock &zone->lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 cb_lock genl_mutex rfkill_global_mutex rcu_node_0 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock &sta->rate_ctrl_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock &sta->rate_ctrl_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock &sta->rate_ctrl_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock &sta->rate_ctrl_lock krc.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rcu_read_lock &pool->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy10 irq_context: 0 (wq_completion)phy10 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy10 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx batched_entropy_u8.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx kfence_freelist_lock irq_context: 0 nf_nat_proto_mutex nf_hook_mutex per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 nf_nat_proto_mutex nf_hook_mutex stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 &xt[i].mutex &rq->__lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rtnl_mutex _xmit_ETHER &local->filter_lock &c->lock irq_context: 0 rtnl_mutex _xmit_ETHER &local->filter_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex _xmit_ETHER &local->filter_lock &____s->seqcount irq_context: 0 cb_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock genl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pcpu_alloc_mutex fs_reclaim irq_context: 0 pcpu_alloc_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pcpu_alloc_mutex &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy11 irq_context: 0 (wq_completion)phy11 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy11 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx remove_cache_srcu &rq->__lock irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &____s->seqcount#2 irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &____s->seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex lock kernfs_idr_lock &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex lock kernfs_idr_lock &____s->seqcount#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->chanctx_mtx rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->chanctx_mtx rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &sta->lock &____s->seqcount#2 irq_context: 0 (wq_completion)phy12 irq_context: 0 (wq_completion)phy12 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy12 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &____s->seqcount#2 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss &n->list_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &pcp->lock &zone->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 kn->active#55 &____s->seqcount#2 irq_context: 0 kn->active#55 &____s->seqcount irq_context: 0 (wq_completion)phy13 irq_context: 0 (wq_completion)phy13 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy13 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh key#20 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &entry->crc_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nf_nat_proto_mutex &obj_hash[i].lock pool_lock irq_context: softirq (&dsp_spl_tl) dsp_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&dsp_spl_tl) dsp_lock fill_pool_map-wait-type-override pool_lock irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &____s->seqcount#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &____s->seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 kn->active#54 &c->lock irq_context: 0 kn->active#54 &____s->seqcount#2 irq_context: 0 kn->active#54 &n->list_lock irq_context: 0 kn->active#54 &n->list_lock &c->lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &c->lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &____s->seqcount#2 irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &____s->seqcount irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 kn->active#56 &c->lock irq_context: 0 kn->active#56 &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nf_hook_mutex &rq->__lock irq_context: 0 nf_hook_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex &c->lock irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock kernfs_idr_lock &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock kernfs_idr_lock &____s->seqcount#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 l2tp_ip6_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sem->wait_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu quarantine_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem remove_cache_srcu irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem remove_cache_srcu &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &ndev->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 l2tp_ip6_lock irq_context: 0 nf_hook_mutex rcu_read_lock pool_lock#2 irq_context: 0 nf_hook_mutex &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)phy14 irq_context: 0 (wq_completion)phy14 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy14 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 &xt[i].mutex &lock->wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 &____s->seqcount#2 irq_context: 0 sk_lock-AF_RXRPC irq_context: 0 sk_lock-AF_RXRPC slock-AF_RXRPC irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex fs_reclaim irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex pool_lock#2 irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &rq->__lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &obj_hash[i].lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex crngs.lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex stock_lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &sb->s_type->i_lock_key#8 irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &dir->lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET &____s->seqcount#8 irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET batched_entropy_u32.lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET &table->hash[i].lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET &table->hash[i].lock k-clock-AF_INET irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET &table->hash[i].lock &table->hash2[i].lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-slock-AF_INET irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex cpu_hotplug_lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &c->lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &____s->seqcount#2 irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &____s->seqcount irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex kthread_create_lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &x->wait irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &____s->seqcount#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &____s->seqcount irq_context: 0 kn->active#53 &____s->seqcount#2 irq_context: 0 kn->active#53 &n->list_lock irq_context: 0 kn->active#53 &n->list_lock &c->lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &x->wait#22 irq_context: 0 slock-AF_RXRPC irq_context: 0 &mm->mmap_lock &xa->xa_lock#7 &n->list_lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#7 &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &____s->seqcount irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &obj_hash[i].lock pool_lock irq_context: 0 ebt_mutex &c->lock irq_context: 0 ebt_mutex &____s->seqcount#2 irq_context: 0 ebt_mutex &____s->seqcount irq_context: 0 rcu_read_lock rcu_read_lock &p->alloc_lock irq_context: 0 &sig->cred_guard_mutex &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &____s->seqcount#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &____s->seqcount#2 irq_context: 0 &group->mark_mutex &____s->seqcount#2 irq_context: 0 &mm->mmap_lock &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &rq->__lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 &xt[i].mutex &n->list_lock &c->lock irq_context: 0 nf_sockopt_mutex &rq->__lock irq_context: 0 nf_sockopt_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 hrtimer_bases.lock fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 hrtimer_bases.lock fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override pool_lock irq_context: 0 &u->iolock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &____s->seqcount#2 irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &c->lock irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock clock-AF_INET6 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) &ndev->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) &ndev->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &dccp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &dccp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &dccp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &dccp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 elock-AF_INET6 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &meta->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&w->w) nfc_devlist_mutex &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex quarantine_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx quarantine_lock irq_context: 0 sb_writers#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &fsnotify_mark_srcu pgd_lock irq_context: 0 &fsnotify_mark_srcu rcu_read_lock pool_lock#2 irq_context: 0 &fsnotify_mark_srcu &obj_hash[i].lock irq_context: 0 &fsnotify_mark_srcu key irq_context: 0 &fsnotify_mark_srcu pcpu_lock irq_context: 0 &fsnotify_mark_srcu percpu_counters_lock irq_context: 0 &fsnotify_mark_srcu &cfs_rq->removed.lock irq_context: 0 sb_writers#8 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_RXRPC irq_context: 0 &sb->s_type->i_mutex_key#10 (wq_completion)krxrpcd irq_context: 0 &sb->s_type->i_mutex_key#10 &wq->mutex irq_context: 0 &sb->s_type->i_mutex_key#10 &wq->mutex &pool->lock/1 irq_context: 0 &sb->s_type->i_mutex_key#10 &wq->mutex &x->wait#10 irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_RXRPC irq_context: 0 &sb->s_type->i_mutex_key#10 &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &x->wait irq_context: 0 &rxnet->local_mutex irq_context: 0 (&local->client_conn_reap_timer) irq_context: 0 &rxnet->conn_lock irq_context: 0 &table->hash[i].lock irq_context: 0 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 k-clock-AF_INET irq_context: 0 &list->lock#20 irq_context: 0 &p->alloc_lock &x->wait &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock irq_context: 0 &p->alloc_lock &x->wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock &c->lock irq_context: 0 &p->alloc_lock &x->wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_es_lock key#2 irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &fsnotify_mark_srcu &rq->__lock irq_context: 0 sb_writers#4 &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback rlock-AF_RXRPC irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fs_reclaim irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &dentry->d_lock &wq#2 irq_context: 0 sk_lock-AF_ROSE irq_context: 0 sk_lock-AF_ROSE slock-AF_ROSE irq_context: 0 sk_lock-AF_ROSE rose_node_list_lock irq_context: 0 slock-AF_ROSE irq_context: 0 &f->f_pos_lock sb_writers#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->xattr_sem irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &wb->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &xa->xa_lock#7 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 &xa->xa_lock#3 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 &xa->xa_lock#3 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &dev->tx_global_lock irq_context: 0 rtnl_mutex &dev->tx_global_lock &nr_netdev_xmit_lock_key irq_context: 0 rtnl_mutex &dev->tx_global_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &sch->q.lock irq_context: 0 rtnl_mutex __ip_vs_mutex irq_context: 0 rtnl_mutex __ip_vs_mutex &ipvs->dest_trash_lock irq_context: 0 rtnl_mutex netlbl_unlhsh_lock irq_context: 0 rtnl_mutex fib_info_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex fib_info_lock pool_lock#2 irq_context: 0 rtnl_mutex flowtable_lock irq_context: 0 rtnl_mutex flowtable_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &nf_conntrack_locks[i] irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &rq->__lock irq_context: 0 rtnl_mutex &ul->lock#2 irq_context: 0 rtnl_mutex nr_list_lock irq_context: 0 rtnl_mutex nr_neigh_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_es_lock key#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_ROSE irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE slock-AF_ROSE irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_ROSE irq_context: 0 &sb->s_type->i_mutex_key#10 wlock-AF_ROSE irq_context: 0 &sb->s_type->i_mutex_key#10 &list->lock#21 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE rose_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE wlock-AF_ROSE irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE &list->lock#21 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE rlock-AF_ROSE irq_context: 0 &sighand->siglock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &(ei->i_block_reservation_lock) key#15 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal &n->list_lock irq_context: 0 sb_writers#4 sb_internal &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle lock#4 &lruvec->lru_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock sb_writers#4 mount_lock irq_context: 0 &mm->mmap_lock sb_writers#4 tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock sb_writers#4 pool_lock#2 irq_context: 0 &mm->mmap_lock sb_writers#4 &journal->j_state_lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &ei->i_raw_lock irq_context: 0 &mm->mmap_lock sb_writers#4 &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &n->list_lock &c->lock irq_context: 0 sb_writers#8 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#8 tomoyo_ss &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock sb_pagefaults irq_context: 0 &mm->mmap_lock sb_pagefaults tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock sb_pagefaults mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock sb_pagefaults pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_state_lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &ei->i_raw_lock irq_context: 0 &mm->mmap_lock sb_pagefaults &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &mapping->private_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &mm->mmap_lock &mapping->private_lock irq_context: 0 &mm->mmap_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 sk_lock-AF_INET6 stock_lock irq_context: 0 sk_lock-AF_INET6 mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 &sb->s_type->i_lock_key#8 irq_context: 0 sk_lock-AF_INET6 &dir->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &dir->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 fs_reclaim irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &c->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &____s->seqcount irq_context: 0 sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 k-clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 crngs.lock irq_context: 0 sk_lock-AF_INET6 &token_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock stock_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &____s->seqcount#8 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock once_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock once_lock crngs.lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &hashinfo->ehash_locks[i] irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &rq->__lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 batched_entropy_u32.lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 fs_reclaim irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &c->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &base->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_INET6 &ei->socket.wq.wait irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &ndev->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &fq->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &list->lock#19 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &list->lock#19 irq_context: 0 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 pgd_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock pool_lock#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 key irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 pcpu_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 percpu_counters_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 pcpu_lock stock_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle key#4 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &meta_group_info[i]->alloc_sem &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &meta_group_info[i]->alloc_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock &table->lock#3 irq_context: softirq rcu_read_lock rcu_read_lock &table->lock#3 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &table->lock#3 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &____s->seqcount#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &vma->vm_lock->lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &vma->vm_lock->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.gp_wq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex stock_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &____s->seqcount#2 irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &____s->seqcount irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 namespace_sem stock_lock irq_context: 0 namespace_sem &n->list_lock irq_context: 0 namespace_sem batched_entropy_u8.lock irq_context: 0 namespace_sem kfence_freelist_lock irq_context: 0 namespace_sem &rq->__lock irq_context: 0 namespace_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 namespace_sem &____s->seqcount#2 irq_context: 0 &type->s_umount_key#23/1 &____s->seqcount#2 irq_context: 0 &type->s_umount_key#23/1 &xa->xa_lock#3 irq_context: 0 &type->s_umount_key#23/1 &xa->xa_lock#3 pool_lock#2 irq_context: 0 &type->s_umount_key#23/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#23/1 stock_lock irq_context: 0 &type->s_umount_key#23/1 &rq->__lock irq_context: 0 &type->s_umount_key#23/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem stock_lock irq_context: 0 pernet_ops_rwsem &____s->seqcount#2 irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &sem->wait_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &sem->wait_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_hook_mutex stock_lock irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &tfile->socket.wq.wait irq_context: 0 purge_vmap_area_lock &____s->seqcount irq_context: 0 purge_vmap_area_lock rcu_read_lock pool_lock#2 irq_context: 0 &group->mark_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock link_idr_lock irq_context: 0 lock link_idr_lock pool_lock#2 irq_context: 0 tracepoints_mutex irq_context: 0 tracepoints_mutex fs_reclaim irq_context: 0 tracepoints_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 tracepoints_mutex pool_lock#2 irq_context: 0 tracepoints_mutex cpu_hotplug_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex &rq->__lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex irq_context: 0 ppp_mutex &mm->mmap_lock irq_context: 0 ppp_mutex fs_reclaim irq_context: 0 ppp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ppp_mutex stock_lock irq_context: 0 ppp_mutex &c->lock irq_context: 0 ppp_mutex &n->list_lock irq_context: 0 ppp_mutex pool_lock#2 irq_context: 0 ppp_mutex stack_depot_init_mutex irq_context: 0 ppp_mutex rtnl_mutex irq_context: 0 ppp_mutex rtnl_mutex &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex pcpu_alloc_mutex irq_context: 0 ppp_mutex rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex fs_reclaim irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex fs_reclaim irq_context: 0 ppp_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ppp_mutex rtnl_mutex pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex net_rwsem irq_context: 0 ppp_mutex rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 ppp_mutex rtnl_mutex &tn->lock irq_context: 0 ppp_mutex rtnl_mutex &x->wait#9 irq_context: 0 ppp_mutex rtnl_mutex &obj_hash[i].lock irq_context: 0 ppp_mutex rtnl_mutex &c->lock irq_context: 0 ppp_mutex rtnl_mutex &____s->seqcount#2 irq_context: 0 ppp_mutex rtnl_mutex &n->list_lock irq_context: 0 ppp_mutex rtnl_mutex &n->list_lock &c->lock irq_context: 0 ppp_mutex rtnl_mutex &k->list_lock irq_context: 0 ppp_mutex rtnl_mutex gdp_mutex irq_context: 0 ppp_mutex rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 ppp_mutex rtnl_mutex lock irq_context: 0 ppp_mutex rtnl_mutex lock kernfs_idr_lock irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 ppp_mutex rtnl_mutex bus_type_sem irq_context: 0 ppp_mutex rtnl_mutex sysfs_symlink_target_lock irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 ppp_mutex rtnl_mutex &dev->power.lock irq_context: 0 ppp_mutex rtnl_mutex dpm_list_mtx irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 ppp_mutex rtnl_mutex subsys mutex#17 irq_context: 0 ppp_mutex rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 ppp_mutex rtnl_mutex &dir->lock#2 irq_context: 0 ppp_mutex rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 link_idr_lock irq_context: 0 alg_types_sem irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex remove_cache_srcu &c->lock irq_context: 0 &xt[i].mutex remove_cache_srcu &n->list_lock irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &xt[i].mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 ppp_mutex rtnl_mutex &____s->seqcount irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex dev_hotplug_mutex irq_context: 0 ppp_mutex rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 ppp_mutex rtnl_mutex dev_base_lock irq_context: 0 ppp_mutex rtnl_mutex input_pool.lock irq_context: 0 ppp_mutex rtnl_mutex batched_entropy_u32.lock irq_context: 0 ppp_mutex rtnl_mutex &tbl->lock irq_context: 0 ppp_mutex rtnl_mutex stock_lock irq_context: 0 ppp_mutex rtnl_mutex sysctl_lock irq_context: 0 ppp_mutex rtnl_mutex nl_table_lock irq_context: 0 ppp_mutex rtnl_mutex nl_table_wait.lock irq_context: 0 ppp_mutex rtnl_mutex proc_subdir_lock irq_context: 0 ppp_mutex rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 ppp_mutex rtnl_mutex proc_subdir_lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 ppp_mutex rtnl_mutex &pnettable->lock irq_context: 0 ppp_mutex rtnl_mutex smc_ib_devices.mutex irq_context: 0 ppp_mutex rtnl_mutex &ppp->rlock irq_context: 0 ppp_mutex rtnl_mutex &ppp->wlock irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 ppp_mutex rtnl_mutex.wait_lock irq_context: 0 ppp_mutex &p->pi_lock irq_context: 0 ppp_mutex &p->pi_lock &rq->__lock irq_context: 0 ppp_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_ALG irq_context: 0 sk_lock-AF_ALG slock-AF_ALG irq_context: 0 slock-AF_ALG irq_context: 0 ppp_mutex &pn->all_ppp_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex stock_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &____s->seqcount#2 irq_context: 0 &iint->mutex &____s->seqcount#2 irq_context: 0 rtnl_mutex cpu_hotplug_lock &list->lock#5 irq_context: 0 &u->iolock &u->peer_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex bpf_devs_lock irq_context: 0 rtnl_mutex &hwstats->hwsdev_list_lock irq_context: 0 rtnl_mutex &ul->lock irq_context: 0 rtnl_mutex &net->xdp.lock irq_context: 0 rtnl_mutex mirred_list_lock irq_context: 0 rtnl_mutex &idev->mc_query_lock irq_context: 0 rtnl_mutex &idev->mc_query_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) irq_context: 0 rtnl_mutex &idev->mc_report_lock irq_context: 0 rtnl_mutex &idev->mc_lock krc.lock irq_context: 0 rtnl_mutex &pnn->pndevs.lock irq_context: 0 rtnl_mutex &pnn->routes.lock irq_context: 0 rtnl_mutex &ppp->rlock irq_context: 0 rtnl_mutex &ppp->wlock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#4 irq_context: 0 rtnl_mutex &ppp->wlock &ppp->rlock irq_context: 0 rtnl_mutex &pn->all_ppp_mutex irq_context: 0 rtnl_mutex &pn->all_ppp_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &pn->all_ppp_mutex pool_lock#2 irq_context: 0 rtnl_mutex &pf->rwait irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &rq->__lock irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 rtnl_mutex subsys mutex#17 &k->k_lock klist_remove_lock irq_context: 0 rtnl_mutex deferred_probe_mutex irq_context: 0 rtnl_mutex device_links_lock irq_context: 0 rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: hardirq rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 rcu_state.barrier_mutex &rq->__lock irq_context: 0 rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback pcpu_lock stock_lock irq_context: softirq rcu_callback &x->wait#24 irq_context: softirq rcu_callback &x->wait#24 &p->pi_lock irq_context: softirq rcu_callback &x->wait#24 &p->pi_lock &rq->__lock irq_context: softirq rcu_callback &x->wait#24 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dev_base_lock irq_context: 0 lweventlist_lock irq_context: 0 netdev_unregistering_wq.lock irq_context: 0 &ppp->wlock irq_context: 0 &ppp->wlock &ppp->rlock irq_context: 0 &list->lock#22 irq_context: 0 krc.lock irq_context: 0 &dir->lock#2 irq_context: 0 &dir->lock#2 &obj_hash[i].lock irq_context: 0 &dir->lock#2 pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) &ndev->lock batched_entropy_u32.lock crngs.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_prealloc_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_list_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 integrity_iint_lock irq_context: 0 sb_writers#4 sb_internal &____s->seqcount#2 irq_context: 0 sb_writers#4 sb_internal &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#17 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &r->producer_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx stock_lock irq_context: 0 &ep->mtx wakeup_ida.xa_lock irq_context: 0 &ep->mtx &x->wait#9 irq_context: 0 &ep->mtx &k->list_lock irq_context: 0 &ep->mtx gdp_mutex irq_context: 0 &ep->mtx gdp_mutex &k->list_lock irq_context: 0 &ep->mtx gdp_mutex fs_reclaim irq_context: 0 &ep->mtx gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &ep->mtx gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &ep->mtx gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx gdp_mutex pool_lock#2 irq_context: 0 &ep->mtx gdp_mutex lock irq_context: 0 &ep->mtx gdp_mutex lock kernfs_idr_lock irq_context: 0 &ep->mtx gdp_mutex &root->kernfs_rwsem irq_context: 0 &ep->mtx gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &ep->mtx lock irq_context: 0 &ep->mtx lock kernfs_idr_lock irq_context: 0 &ep->mtx &root->kernfs_rwsem irq_context: 0 &ep->mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &ep->mtx bus_type_sem irq_context: 0 &ep->mtx sysfs_symlink_target_lock irq_context: 0 &ep->mtx uevent_sock_mutex irq_context: 0 &ep->mtx uevent_sock_mutex fs_reclaim irq_context: 0 &ep->mtx uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &ep->mtx uevent_sock_mutex pool_lock#2 irq_context: 0 &ep->mtx uevent_sock_mutex nl_table_lock irq_context: 0 &ep->mtx uevent_sock_mutex &rq->__lock irq_context: 0 &ep->mtx uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 rcu_node_0 irq_context: 0 sb_writers#5 rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx uevent_sock_mutex nl_table_wait.lock irq_context: 0 &ep->mtx uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &ep->mtx uevent_sock_mutex &base->lock irq_context: 0 &ep->mtx uevent_sock_mutex &base->lock &obj_hash[i].lock irq_context: 0 &ep->mtx subsys mutex#15 irq_context: 0 &ep->mtx subsys mutex#15 &k->k_lock irq_context: 0 &ep->mtx events_lock irq_context: 0 &ep->mtx &dentry->d_lock irq_context: 0 &ep->mtx &n->list_lock irq_context: 0 &ep->mtx &n->list_lock &c->lock irq_context: 0 &ep->mtx uevent_sock_mutex &c->lock irq_context: 0 &ep->mtx &u->lock irq_context: 0 &ep->mtx &u->lock &u->peer_wait irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock tk_core.seq.seqcount irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->poll_wait irq_context: 0 &ep->mtx &ws->lock irq_context: 0 &ep->mtx &ws->lock tk_core.seq.seqcount irq_context: 0 &ep->mtx &ws->lock &obj_hash[i].lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->poll_wait/1 irq_context: 0 &ep->mtx &ep->lock &ws->lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->poll_wait/1 &p->pi_lock irq_context: 0 &ep->mtx &ep->lock &ws->lock tk_core.seq.seqcount irq_context: 0 &ep->mtx &ep->lock &ws->lock &obj_hash[i].lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->poll_wait/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->poll_wait/1 &p->pi_lock &rq->__lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->poll_wait/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex stock_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex stock_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex stock_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex stock_lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->poll_wait/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &new_node->seq_out_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock pool_lock#2 irq_context: 0 link_idr_lock &obj_hash[i].lock irq_context: 0 link_idr_lock pool_lock#2 irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock &obj_hash[i].lock irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock &obj_hash[i].lock pool_lock irq_context: 0 tracepoints_mutex &rq->__lock irq_context: 0 tracepoints_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock &c->lock irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock &base->lock irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock &base->lock &obj_hash[i].lock irq_context: 0 tracepoints_mutex &obj_hash[i].lock irq_context: 0 rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mq_lock irq_context: 0 (wq_completion)events free_ipc_work irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work rcu_read_lock mount_lock irq_context: 0 (wq_completion)events free_ipc_work rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)events free_ipc_work mount_lock irq_context: 0 (wq_completion)events free_ipc_work mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)events free_ipc_work &fsnotify_mark_srcu irq_context: 0 (wq_completion)events free_ipc_work rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 shrinker_rwsem irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 rename_lock.seqcount irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &dentry->d_lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &sb->s_type->i_lock_key#20 irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &s->s_inode_list_lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &xa->xa_lock#7 irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &fsnotify_mark_srcu irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 sb_lock irq_context: 0 (wq_completion)events free_ipc_work unnamed_dev_ida.xa_lock irq_context: 0 (wq_completion)events free_ipc_work list_lrus_mutex irq_context: 0 (wq_completion)events free_ipc_work &xa->xa_lock#3 irq_context: 0 (wq_completion)events free_ipc_work &xa->xa_lock#3 &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work &xa->xa_lock#3 pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work sb_lock irq_context: 0 (wq_completion)events free_ipc_work sb_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work sb_lock pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work mnt_id_ida.xa_lock irq_context: 0 (wq_completion)events free_ipc_work &ids->rwsem irq_context: 0 (wq_completion)events free_ipc_work (work_completion)(&ht->run_work) irq_context: 0 (wq_completion)events free_ipc_work &ht->mutex irq_context: 0 (wq_completion)events free_ipc_work &ht->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work &ht->mutex pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work percpu_counters_lock irq_context: 0 (wq_completion)events free_ipc_work pcpu_lock irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock krc.lock irq_context: 0 (wq_completion)events free_ipc_work proc_inum_ida.xa_lock irq_context: 0 (wq_completion)events free_ipc_work stock_lock irq_context: 0 (wq_completion)netns irq_context: 0 (wq_completion)netns net_cleanup_work irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem net_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->nsid_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &tn->node_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ebt_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &xt[i].mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &nft_net->commit_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_ct_ecache_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem netns_bpf_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&net->fs_probe_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->cells_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&net->cells_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rq->__lock irq_context: 0 (wq_completion)afs irq_context: 0 (wq_completion)afs (work_completion)(&net->cells_manager) irq_context: 0 (wq_completion)afs (work_completion)(&net->cells_manager) &net->cells_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex tracepoint_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex tracepoint_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex &base->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) tracepoint_srcu_srcu_usage.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&net->fs_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem bit_wait_table + i irq_context: 0 (wq_completion)afs (work_completion)(&net->fs_manager) irq_context: 0 (wq_completion)afs (work_completion)(&net->fs_manager) &(&net->fs_lock)->lock irq_context: 0 (wq_completion)afs (work_completion)(&net->fs_manager) bit_wait_table + i irq_context: 0 (wq_completion)afs (work_completion)(&net->fs_manager) bit_wait_table + i &p->pi_lock irq_context: 0 (wq_completion)afs (work_completion)(&net->fs_manager) bit_wait_table + i &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)afs (work_completion)(&net->fs_manager) bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC k-slock-AF_RXRPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rx->incoming_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->conn_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &call->waitq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC rcu_read_lock &call->notify_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC (rxrpc_call_limiter).lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rx->recvmsg_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rx->call_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->call_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC (&call->timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &list->lock#23 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_RXRPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (wq_completion)kafsd irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wq->mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wq->mutex &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wq->mutex &x->wait#10 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-clock-AF_RXRPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &local->services_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (wq_completion)krxrpcd irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wq->mutex &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rlock-AF_RXRPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &x->wait irq_context: 0 k-clock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sb->s_type->i_lock_key#8 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &xa->xa_lock#7 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &fsnotify_mark_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem proc_subdir_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &ent->pde_unload_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem proc_inum_ida.xa_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex (work_completion)(&data->gc_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &fq->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex nf_hook_mutex irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex nf_hook_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &list->lock#19 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &list->lock#19 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex nf_hook_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex nf_hook_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_connlabels_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex net_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&ovs_net->masks_rebalance)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&ovs_net->dp_notify_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &srv->idr_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem wq_pool_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem wq_pool_mutex &wq->mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&sdp->delay_work) irq_context: softirq (&sdp->delay_work) rcu_read_lock &pool->lock irq_context: softirq (&sdp->delay_work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->service_conn_reaper) irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rnp->exp_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC k-slock-AF_TIPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &nt->cluster_scope_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC k-clock-AF_TIPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_TIPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ptype_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rnp->exp_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rnp->exp_wq[0] irq_context: softirq rcu_callback tracepoint_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: softirq rcu_callback tracepoint_srcu_srcu_usage.lock &obj_hash[i].lock irq_context: softirq rcu_callback tracepoint_srcu_srcu_usage.lock &base->lock irq_context: softirq rcu_callback tracepoint_srcu_srcu_usage.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) stock_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pcpu_lock stock_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) purge_vmap_area_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET stock_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex.wait_lock irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &p->pi_lock irq_context: 0 sk_lock-AF_INET &hashinfo->ehash_locks[i] irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dir->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&tn->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &tn->nametbl_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &ul->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 sk_lock-AF_INET &ei->socket.wq.wait irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&ht->run_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &ht->mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &ht->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &ht->mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&c->work)->work) irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->service_conn_reaper) &rxnet->conn_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem pcpu_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-clock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (wq_completion)krdsd irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&rtn->rds_tcp_accept_w) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 &h->lhash2[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 &queue->rskq_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 elock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &knet->mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rds_tcp_conn_lock irq_context: 0 &knet->mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock irq_context: 0 &knet->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem loop_conns_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (wq_completion)l2tp irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex &x->wait#24 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock rcu_node_0 irq_context: 0 &u->iolock &rcu_state.expedited_wq irq_context: 0 &u->iolock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &u->iolock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &u->iolock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &sb->s_type->i_lock_key#8 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &xa->xa_lock#7 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &fsnotify_mark_srcu irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &hashinfo->ehash_locks[i] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-clock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 krc.lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 elock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &dir->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &token_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &msk->pm.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 elock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&msk->work) irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu quarantine_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu &c->lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu &n->list_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &mapping->private_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: softirq (&sdp->delay_work) rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&sdp->delay_work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&sdp->delay_work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 kn->active#5 &____s->seqcount#2 irq_context: 0 sb_writers#8 kn->active#5 &____s->seqcount irq_context: 0 sb_writers#8 kn->active#5 &n->list_lock irq_context: 0 &mux->lock irq_context: 0 &mux->rx_lock irq_context: 0 sk_lock-AF_KCM irq_context: 0 sk_lock-AF_KCM slock-AF_KCM irq_context: 0 sk_lock-AF_KCM fs_reclaim irq_context: 0 sk_lock-AF_KCM fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_KCM fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_KCM fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &xa->xa_lock#7 irq_context: 0 sb_writers#4 &mapping->private_lock irq_context: 0 sb_writers#4 &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#7 irq_context: 0 sb_writers#4 lock#4 irq_context: 0 sb_writers#4 lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 lock#5 irq_context: 0 sb_writers#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &mapping->private_lock rcu_read_lock rcu_read_lock key#10 irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#7 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#7 pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &(ei->i_block_reservation_lock) irq_context: 0 sb_writers#4 sb_internal jbd2_handle &(ei->i_block_reservation_lock) key#15 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&rxnet->peer_keepalive_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&rxnet->peer_keepalive_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&rxnet->service_conn_reap_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &x->wait#10 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rxnet->conn_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_KCM pool_lock#2 irq_context: 0 sk_lock-AF_KCM &____s->seqcount irq_context: 0 sk_lock-AF_KCM &mm->mmap_lock irq_context: 0 slock-AF_KCM irq_context: 0 sk_lock-AF_KCM &mux->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM slock-AF_KCM irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM clock-AF_KCM irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_KCM irq_context: 0 &sb->s_type->i_mutex_key#10 &mux->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&kcm->tx_work) irq_context: 0 &sb->s_type->i_mutex_key#10 &mux->rx_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &mux->rx_lock rlock-AF_KCM irq_context: 0 ppp_mutex rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_base_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock &list->lock#5 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &mm->mmap_lock lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu irq_context: 0 pernet_ops_rwsem remove_cache_srcu quarantine_lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu &c->lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu &n->list_lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem nf_hook_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_hook_mutex rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex bpf_devs_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex net_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &in_dev->mc_tomb_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dir->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex class irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (&tbl->proxy_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ul->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &net->xdp.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex mirred_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &nft_net->commit_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tn->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ul->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex proc_subdir_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ent->pde_unload_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &net->ipv6.addrconf_hash_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ndev->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ndev->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &sb->s_type->i_mutex_key#8 &____s->seqcount#2 irq_context: 0 ppp_mutex ppp_mutex.wait_lock irq_context: 0 ppp_mutex &rq->__lock irq_context: 0 ppp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_report_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &pnn->pndevs.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &pnn->routes.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &pnettable->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex smc_ib_devices.mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex target_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_NONE irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex kernfs_idr_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &k->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex subsys mutex#17 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex subsys mutex#17 &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &x->wait#9 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dpm_list_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev->power.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex deferred_probe_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex device_links_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex gdp_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock xps_map_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex &c->lock irq_context: 0 ppp_mutex rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 ppp_mutex rtnl_mutex quarantine_lock irq_context: 0 ppp_mutex rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 ppp_mutex rtnl_mutex &pcp->lock &zone->lock irq_context: 0 ppp_mutex rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem pool_lock#2 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &____s->seqcount irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_base_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem lweventlist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem napi_hash_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ppp_mutex rtnl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dir->lock#2 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dir->lock#2 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dir->lock#2 quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem netdev_unregistering_wq.lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &sem->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page) irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 key irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_wait_updates irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_TUNNEL6 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 &____s->seqcount irq_context: 0 (wq_completion)events_unbound (reaper_work).work rcu_node_0 irq_context: 0 (wq_completion)events_unbound (reaper_work).work &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_unbound (reaper_work).work &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &meta->lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &p->alloc_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &list->lock irq_context: 0 &sb->s_type->i_mutex_key#8 kauditd_wait.lock irq_context: 0 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback &x->wait#24 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 sb_writers#4 sb_internal &rq->__lock irq_context: 0 sb_writers#4 sb_internal &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tvlv.container_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tvlv.container_list_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem krc.lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &tn->node_list_lock irq_context: 0 pernet_ops_rwsem ebt_mutex irq_context: 0 pernet_ops_rwsem &xt[i].mutex irq_context: 0 pernet_ops_rwsem &nft_net->commit_mutex irq_context: 0 pernet_ops_rwsem netns_bpf_mutex irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rnp->exp_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_SIT irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 pernet_ops_rwsem &hn->hn_lock irq_context: 0 pernet_ops_rwsem sysctl_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem sysctl_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem sysctl_lock krc.lock irq_context: 0 pernet_ops_rwsem sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)inet_frag_wq irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) (work_completion)(&ht->run_work) irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) &ht->mutex irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) &ht->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) &ht->mutex pool_lock#2 irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events fqdir_free_work irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &pool->lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &rq->__lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &this->info_list_lock irq_context: 0 pernet_ops_rwsem &pnettable->lock irq_context: 0 pernet_ops_rwsem &pnetids_ndev->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6/1 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6/1 k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6/1 rlock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6/1 &list->lock#24 irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 &sctp_ep_hashtable[i].lock irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 pool_lock#2 irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem &xa->xa_lock#7 irq_context: 0 pernet_ops_rwsem &fsnotify_mark_srcu irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &ent->pde_unload_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-slock-AF_INET6 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-slock-AF_INET6 pool_lock#2 irq_context: 0 pernet_ops_rwsem k-slock-AF_INET6 elock-AF_INET6 irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &ei->i_raw_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 sb_writers#4 sb_internal &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_state.barrier_mutex.wait_lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex.wait_lock irq_context: 0 (wq_completion)events fqdir_free_work &p->pi_lock irq_context: 0 (wq_completion)events fqdir_free_work &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events fqdir_free_work &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events fqdir_free_work &obj_hash[i].lock irq_context: 0 (wq_completion)events fqdir_free_work pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &ht->mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_TUNNEL irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu quarantine_lock irq_context: 0 nl_table_wait.lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &knet->mutex irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock irq_context: 0 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->poll_wait/1 irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &ep->mtx &ACCESS_PRIVATE(sdp, lock) irq_context: 0 &ep->mtx rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex pool_lock#2 irq_context: 0 &ep->mtx &cfs_rq->removed.lock irq_context: 0 &ep->mtx wakeup_srcu irq_context: 0 &ep->mtx wakeup_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 &ep->mtx wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock irq_context: 0 &ep->mtx wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &ep->mtx wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &ep->mtx wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &ep->mtx wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx &x->wait#3 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) rcu_node_0 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &ep->mtx (&ws->timer) irq_context: 0 &ep->mtx &base->lock irq_context: 0 &ep->mtx &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 &ep->mtx &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &ep->mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 &ep->mtx &root->kernfs_rwsem pool_lock#2 irq_context: 0 &ep->mtx subsys mutex#15 &k->k_lock klist_remove_lock irq_context: 0 &ep->mtx deferred_probe_mutex irq_context: 0 &ep->mtx device_links_lock irq_context: 0 &ep->mtx mmu_notifier_invalidate_range_start irq_context: 0 &ep->mtx uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 &ep->mtx deleted_ws.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex &tun->lock irq_context: 0 rtnl_mutex wlock-AF_UNSPEC irq_context: 0 rtnl_mutex elock-AF_UNSPEC irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER krc.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sem->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 kn->active#5 &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_ETHER irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 sb_writers#8 kn->active#5 &rq->__lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &n->list_lock &c->lock irq_context: 0 lweventlist_lock pool_lock#2 irq_context: 0 lweventlist_lock &dir->lock#2 irq_context: 0 &tun->lock irq_context: 0 events_lock irq_context: 0 wakeup_srcu irq_context: 0 wakeup_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock irq_context: 0 wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (&ws->timer) irq_context: 0 subsys mutex#15 irq_context: 0 subsys mutex#15 &k->k_lock irq_context: 0 subsys mutex#15 &k->k_lock klist_remove_lock irq_context: 0 uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 uevent_sock_mutex pool_lock#2 irq_context: 0 uevent_sock_mutex nl_table_lock irq_context: 0 uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 uevent_sock_mutex nl_table_wait.lock irq_context: 0 gdp_mutex sysfs_symlink_target_lock irq_context: 0 gdp_mutex &obj_hash[i].lock irq_context: 0 &ws->lock irq_context: 0 deleted_ws.lock irq_context: 0 wakeup_ida.xa_lock irq_context: 0 &sb->s_type->i_mutex_key#8 irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 irq_context: 0 &sb->s_type->i_mutex_key#8 &wb->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &xa->xa_lock#7 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#4 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#5 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#4 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#5 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->private_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 &s->s_inode_wblist_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock pcpu_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock percpu_counters_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &blkcg->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &blkcg->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &dd->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &base->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] irq_context: 0 &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 lock#4 irq_context: 0 &sb->s_type->i_mutex_key#8 lock#5 irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_es_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &dd->lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &dd->lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 (&timer.timer) irq_context: 0 rcu_read_lock rcu_read_lock_bh &zone->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &zone->lock &____s->seqcount irq_context: 0 sock_diag_mutex irq_context: 0 sock_diag_mutex sock_diag_table_mutex irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG fs_reclaim irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG pool_lock#2 irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG &c->lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex clock-AF_INET6 irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex slock-AF_INET6 irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex sk_lock-AF_INET6 irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex sk_lock-AF_INET6 slock-AF_INET6 irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex &rq->__lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sock_diag_mutex sock_diag_table_mutex rcu_read_lock pool_lock#2 irq_context: 0 sock_diag_mutex sock_diag_table_mutex rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 sock_diag_mutex sock_diag_table_mutex rlock-AF_NETLINK irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem key#3 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock key#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock key#10 irq_context: 0 &sb->s_type->i_mutex_key#8 lock#4 &lruvec->lru_lock irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &memcg->move_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &xa->xa_lock#7 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_INET6 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 slock-AF_INET6 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &mm->mmap_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 fs_reclaim irq_context: 0 rtnl_mutex sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex sk_lock-AF_INET6 pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 rcu_read_lock stock_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 rcu_read_lock &dir->lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock irq_context: 0 rtnl_mutex slock-AF_INET6 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock fs_reclaim irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &____s->seqcount#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock stock_lock irq_context: 0 ppp_mutex &____s->seqcount#2 irq_context: 0 ppp_mutex &____s->seqcount irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#4 &lruvec->lru_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#15 irq_context: 0 sb_writers#4 &dentry->d_lock irq_context: 0 sb_writers#4 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sem->wait_lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_IPGRE irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 integrity_iint_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex fs_reclaim irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &p->alloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &list->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 kauditd_wait.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &iint->mutex irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem lock#5 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 key irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#7 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#5 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->private_lock rcu_read_lock rcu_read_lock key#10 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#7 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#7 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &fsnotify_mark_srcu &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &fn->fou_lock irq_context: 0 &mm->mmap_lock sb_writers#4 &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page)#2 ptlock_ptr(page)#2/1 rcu_read_lock &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page)#2 ptlock_ptr(page)#2/1 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page)#2 ptlock_ptr(page)#2/1 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#15 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex kernfs_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex kernfs_idr_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->sync_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem hwsim_radio_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem hwsim_netgroup_ida.xa_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rdma_nets_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rdma_nets_rwsem rdma_nets.xa_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem devices_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-clock-AF_NETLINK irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &nlk->wait irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem wlock-AF_NETLINK irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dir->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rdma_nets.xa_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &dentry->d_lock &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &dentry->d_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &dentry->d_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_LOOPBACK irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 kn->active#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) purge_vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) purge_vmap_area_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) rlock-AF_NETLINK irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) &dir->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &hn->hn_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &this->info_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pnettable->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pnetids_ndev->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6/1 k-slock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6/1 rlock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6/1 &list->lock#24 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 &sctp_ep_hashtable[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 k-clock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET6 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET6 elock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &x->wait#24 irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &c->lock irq_context: softirq &(&bat_priv->dat.work)->timer irq_context: softirq &(&bat_priv->dat.work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bat_priv->dat.work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bat_priv->dat.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&bat_priv->dat.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bat_priv->dat.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bat_priv->bla.work)->timer irq_context: softirq &(&bat_priv->bla.work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bat_priv->bla.work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bat_priv->bla.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh pool_lock#2 irq_context: 0 sb_writers#3 oom_adj_mutex &rq->__lock irq_context: 0 sb_writers#3 oom_adj_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &c->lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount irq_context: 0 rtnl_mutex kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_INET6 krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &hash->list_locks[i] irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) key#20 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &entry->crc_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx quarantine_lock irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu quarantine_lock irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu &c->lock irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu &n->list_lock irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &c->lock irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work pool_lock irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) rcu_read_lock &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET k-clock-AF_INET irq_context: 0 pernet_ops_rwsem k-slock-AF_INET irq_context: 0 pernet_ops_rwsem k-slock-AF_INET &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-slock-AF_INET pool_lock#2 irq_context: 0 pernet_ops_rwsem k-slock-AF_INET elock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem key#3 irq_context: 0 sb_writers#4 sb_internal jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &xa->xa_lock#7 stock_lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET k-clock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET elock-AF_INET irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &meta->lock irq_context: softirq rcu_read_lock hwsim_radio_lock &____s->seqcount#2 irq_context: softirq rcu_read_lock hwsim_radio_lock &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 pernet_ops_rwsem &sn->gssp_lock irq_context: 0 pernet_ops_rwsem &cd->hash_lock irq_context: 0 pernet_ops_rwsem cache_list_lock &cd->hash_lock irq_context: 0 pernet_ops_rwsem (&net->can.stattimer) irq_context: 0 pernet_ops_rwsem xfrm_state_gc_work irq_context: 0 pernet_ops_rwsem &net->xfrm.xfrm_state_lock irq_context: 0 pernet_ops_rwsem (work_completion)(&ht->run_work) irq_context: 0 pernet_ops_rwsem &ht->mutex irq_context: 0 pernet_ops_rwsem &ht->mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &ht->mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 pernet_ops_rwsem (work_completion)(&(&net->ipv6.addr_chk_work)->work) irq_context: 0 pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock pool_lock#2 irq_context: 0 pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock krc.lock irq_context: 0 pernet_ops_rwsem ip6_fl_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &net->rules_mod_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex krc.lock irq_context: 0 pernet_ops_rwsem (&net->ipv6.ip6_fib_timer) irq_context: 0 pernet_ops_rwsem rtnl_mutex (&mrt->ipmr_expire_timer) irq_context: 0 pernet_ops_rwsem rtnl_mutex &base->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex (work_completion)(&ht->run_work) irq_context: 0 pernet_ops_rwsem rtnl_mutex &ht->mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex &ht->mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &ht->mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-clock-AF_NETLINK irq_context: 0 pernet_ops_rwsem &nlk->wait irq_context: 0 pernet_ops_rwsem wlock-AF_NETLINK irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem __ip_vs_mutex irq_context: 0 pernet_ops_rwsem (&ipvs->dest_trash_timer) irq_context: 0 pernet_ops_rwsem (work_completion)(&(&ipvs->expire_nodest_conn_work)->work) irq_context: 0 pernet_ops_rwsem (work_completion)(&(&ipvs->defense_work)->work) irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem (work_completion)(&(&ipvs->est_reload_work)->work) irq_context: 0 pernet_ops_rwsem nfnl_subsys_ipset irq_context: 0 pernet_ops_rwsem recent_lock irq_context: 0 pernet_ops_rwsem hashlimit_mutex irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 pernet_ops_rwsem (work_completion)(&(&cnet->ecache.dwork)->work) irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex nf_connlabels_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem sysfs_symlink_target_lock irq_context: 0 pernet_ops_rwsem kernfs_idr_lock irq_context: 0 pernet_ops_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem &meta->lock irq_context: 0 pernet_ops_rwsem tcp_metrics_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &meta->lock irq_context: 0 pernet_ops_rwsem k-clock-AF_INET irq_context: 0 pernet_ops_rwsem (work_completion)(&net->xfrm.policy_hash_work) irq_context: 0 pernet_ops_rwsem &net->xfrm.xfrm_policy_lock irq_context: 0 pernet_ops_rwsem (work_completion)(&net->xfrm.state_hash_work) irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &list->lock#2 irq_context: 0 pernet_ops_rwsem genl_sk_destructing_waitq.lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock rcu_read_lock &xa->xa_lock#7 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &x->wait#24 irq_context: 0 &dir->lock &obj_hash[i].lock irq_context: 0 &dir->lock pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work &rnp->exp_lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 tasklist_lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock &base->lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 misc_mtx &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->mark_mutex remove_cache_srcu irq_context: 0 &group->mark_mutex remove_cache_srcu quarantine_lock irq_context: 0 &group->mark_mutex remove_cache_srcu &c->lock irq_context: 0 &group->mark_mutex remove_cache_srcu &n->list_lock irq_context: 0 &group->mark_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &group->mark_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 rtnl_mutex qdisc_mod_lock irq_context: 0 rtnl_mutex &block->lock irq_context: 0 rtnl_mutex &block->cb_lock irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock fs_reclaim irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock &c->lock irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock pool_lock#2 irq_context: 0 rtnl_mutex &dev->tx_global_lock _xmit_ETHER#2 irq_context: 0 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 namespace_sem remove_cache_srcu irq_context: 0 namespace_sem remove_cache_srcu quarantine_lock irq_context: 0 namespace_sem remove_cache_srcu &c->lock irq_context: 0 namespace_sem remove_cache_srcu &n->list_lock irq_context: 0 namespace_sem remove_cache_srcu &rq->__lock irq_context: 0 namespace_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 namespace_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 namespace_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 namespace_sem pcpu_alloc_mutex &rq->__lock irq_context: 0 namespace_sem pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback stock_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &disk->open_mutex &rq->__lock irq_context: 0 &disk->open_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sn->gssp_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &cd->hash_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem cache_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem cache_list_lock &cd->hash_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&net->can.stattimer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem xfrm_state_gc_work irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->xfrm.xfrm_state_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &hashinfo->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-clock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&net->ipv6.addr_chk_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ip6_fl_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &net->rules_mod_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem percpu_counters_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&net->ipv6.ip6_fib_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (&mrt->ipmr_expire_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&ht->run_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ht->mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ht->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ht->mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_app_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_app_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_app_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_app_mutex quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&ipvs->dest_trash_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&ipvs->expire_nodest_conn_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&ipvs->defense_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex pcpu_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&ipvs->est_reload_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nfnl_subsys_ipset irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem recent_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem hashlimit_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_log_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_log_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&cnet->ecache.dwork)->work) irq_context: 0 sk_lock-AF_KCM &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_KCM &pcp->lock &zone->lock &____s->seqcount irq_context: 0 namespace_sem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 namespace_sem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 namespace_sem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#23/1 &xa->xa_lock#3 &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 &n->list_lock &c->lock irq_context: 0 sk_lock-AF_KCM &c->lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq fs/notify/mark.c:89 rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex nf_connlabels_lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &k->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_hook_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_hook_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sn->pipefs_sb_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysfs_symlink_target_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem tcp_metrics_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &ei->i_es_lock key#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem key#15 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem key#3 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#7 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock &list->lock#5 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 pernet_ops_rwsem rtnl_mutex bpf_devs_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &in_dev->mc_tomb_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex class irq_context: 0 pernet_ops_rwsem rtnl_mutex (&tbl->proxy_timer) irq_context: 0 pernet_ops_rwsem rtnl_mutex &ul->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &net->xdp.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex mirred_list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &nft_net->commit_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &ul->lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &ent->pde_unload_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &net->ipv6.addrconf_hash_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &ndev->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &ndev->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_report_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock krc.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &pnn->pndevs.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &pnn->routes.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex target_list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex _xmit_TUNNEL6 irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex kernfs_idr_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 pernet_ops_rwsem rtnl_mutex subsys mutex#17 &k->k_lock klist_remove_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex deferred_probe_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex device_links_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock xps_map_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &____s->seqcount#2 irq_context: 0 namespace_sem &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem dev_base_lock irq_context: 0 pernet_ops_rwsem lweventlist_lock irq_context: 0 pernet_ops_rwsem napi_hash_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-clock-AF_INET irq_context: 0 pernet_ops_rwsem &dir->lock#2 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &dir->lock#2 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&net->xfrm.policy_hash_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->xfrm.xfrm_policy_lock irq_context: 0 pernet_ops_rwsem netdev_unregistering_wq.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&net->xfrm.state_hash_work) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 &type->s_umount_key#23/1 list_lrus_mutex &rq->__lock irq_context: 0 &type->s_umount_key#23/1 list_lrus_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#7 stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#7 &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#7 &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#7 &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#7 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#7 &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#7 pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex _xmit_SIT irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &list->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem genl_sk_destructing_waitq.lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#4 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#23/1 &n->list_lock irq_context: 0 &type->s_umount_key#23/1 &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 sb_writers#4 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#4 tomoyo_ss &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem key#15 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem key#3 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &q->queue_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &q->queue_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &q->queue_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &q->queue_lock pcpu_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &q->queue_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &q->queue_lock percpu_counters_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &q->queue_lock &blkcg->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &q->queue_lock &blkcg->lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex &x->wait#24 irq_context: 0 &mm->mmap_lock &xa->xa_lock#3 irq_context: 0 &mm->mmap_lock &xa->xa_lock#3 pool_lock#2 irq_context: 0 &mm->mmap_lock &sb->s_type->i_lock_key irq_context: 0 &mm->mmap_lock &s->s_inode_list_lock irq_context: 0 &mm->mmap_lock batched_entropy_u32.lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_lock_key &dentry->d_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock ptlock_ptr(page) irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &fq->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->active_txq_lock[i] irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->active_txq_lock[i] irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->queue_stop_reason_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &list->lock#19 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &list->lock#19 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 fs_reclaim irq_context: 0 sb_writers &type->i_mutex_dir_key#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &xa->xa_lock#3 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work &dir->lock irq_context: 0 (wq_completion)netns net_cleanup_work &dir->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work &dir->lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work stock_lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 stock_lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tomoyo_ss irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tomoyo_ss &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &dentry->d_lock &wq#2 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &sbinfo->stat_lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &xa->xa_lock#3 &____s->seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &xa->xa_lock#3 pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &xa->xa_lock#3 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &xa->xa_lock#3 &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &sb->s_type->i_lock_key#5 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &s->s_inode_list_lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tk_core.seq.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key#2 batched_entropy_u32.lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &sb->s_type->i_lock_key#5 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#4 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 fs_reclaim irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &____s->seqcount irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 stock_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xa->xa_lock#7 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 lock#4 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 lock#4 &lruvec->lru_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &info->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xa->xa_lock#7 stock_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xa->xa_lock#7 pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 key#9 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &pcp->lock &zone->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock sb_pagefaults &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &wb->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 fs_reclaim &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock kfence_freelist_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 pernet_ops_rwsem &ht->mutex rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 &mm->mmap_lock sb_pagefaults &c->lock irq_context: 0 &mm->mmap_lock sb_pagefaults &____s->seqcount#2 irq_context: 0 &mm->mmap_lock sb_pagefaults &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &pa->pa_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &lg->lg_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock pool_lock#2 irq_context: 0 &ei->i_data_sem irq_context: 0 &ei->i_data_sem &sem->wait_lock irq_context: 0 &ei->i_data_sem &rq->__lock irq_context: 0 &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sem->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &ei->i_data_sem pool_lock#2 irq_context: 0 &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_wait_done_commit irq_context: 0 &xa->xa_lock#7 &c->lock irq_context: 0 &xa->xa_lock#7 &____s->seqcount#2 irq_context: 0 &xa->xa_lock#7 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &dd->lock irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_state_lock irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_state_lock &base->lock irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &ret->b_state_lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xa->xa_lock#7 &c->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xa->xa_lock#7 &n->list_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xa->xa_lock#7 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex _xmit_TUNNEL irq_context: 0 pernet_ops_rwsem rtnl_mutex kernfs_idr_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex kernfs_idr_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 stock_lock irq_context: 0 sk_lock-AF_INET6 &n->list_lock irq_context: 0 sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 (kmod_concurrent_max).lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &n->list_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock/1 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &x->wait#17 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &meta->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->poll_wait/1 &p->pi_lock irq_context: 0 &ep->mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &journal->j_state_lock irq_context: 0 sk_lock-AF_INET6 slock-AF_INET6 &sk->sk_lock.wq irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 (&timer.timer) irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_wait_commit irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_wait_done_commit irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fs_reclaim &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &rq->__lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock &journal->j_list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work rcu_node_0 irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &mapping->private_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &wb->list_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &wb->list_lock &sb->s_type->i_lock_key#5 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 running_helpers_waitq.lock irq_context: 0 slock-AF_INET6 &sk->sk_lock.wq irq_context: 0 slock-AF_INET6 &sk->sk_lock.wq &p->pi_lock irq_context: 0 slock-AF_INET6 &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 slock-AF_INET6 &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) pool_lock#2 irq_context: 0 &ep->mtx &rcu_state.expedited_wq irq_context: 0 &ep->mtx &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &ep->mtx &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &ep->mtx &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER krc.lock irq_context: 0 &ep->mtx &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &ep->mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex _xmit_ETHER irq_context: 0 &ep->mtx &sem->wait_lock irq_context: 0 &ep->mtx &p->pi_lock irq_context: 0 &ep->mtx &p->pi_lock &rq->__lock irq_context: 0 &ep->mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&peer->timer_retransmit_handshake) irq_context: softirq (&peer->timer_retransmit_handshake) &peer->endpoint_lock irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex fs_reclaim &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rlock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &list->lock#24 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 &sctp_ep_hashtable[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 clock-AF_INET irq_context: 0 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ei->i_data_sem &rq->__lock irq_context: 0 &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)afs (work_completion)(&net->cells_manager) bit_wait_table + i irq_context: 0 (wq_completion)afs (work_completion)(&net->cells_manager) bit_wait_table + i &p->pi_lock irq_context: 0 (wq_completion)afs (work_completion)(&net->cells_manager) bit_wait_table + i &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)afs (work_completion)(&net->cells_manager) bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 pernet_ops_rwsem rtnl_mutex _xmit_IPGRE irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rnp->exp_wq[0] irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &info->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fs_reclaim &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &mapping->private_lock irq_context: 0 tracepoints_mutex &c->lock irq_context: 0 tracepoints_mutex &n->list_lock irq_context: 0 tracepoints_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 &ei->i_data_sem &mapping->private_lock irq_context: 0 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &ei->i_data_sem pool_lock#2 irq_context: 0 &dir->lock#2 &meta->lock irq_context: 0 &dir->lock#2 kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) wakeup_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 uevent_sock_mutex &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &fn->fou_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &mapping->private_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 &xt[i].mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &xt[i].mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &sem->wait_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 tomoyo_ss quarantine_lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem ipvs->sync_mutex irq_context: 0 pernet_ops_rwsem hwsim_radio_lock irq_context: 0 pernet_ops_rwsem rdma_nets_rwsem irq_context: 0 pernet_ops_rwsem rdma_nets_rwsem rdma_nets.xa_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex _xmit_LOOPBACK irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &meta->lock irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &n->lock &____s->seqcount#9 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock nl_table_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock nl_table_wait.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock lock#8 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock id_table_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &dir->lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock krc.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &n->lock irq_context: softirq rcu_callback &x->wait#24 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&peer->timer_persistent_keepalive) batched_entropy_u8.lock irq_context: softirq (&peer->timer_persistent_keepalive) kfence_freelist_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 pernet_ops_rwsem remove_cache_srcu &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex krc.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex krc.lock &base->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex krc.lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 namespace_sem namespace_sem.wait_lock irq_context: 0 namespace_sem.wait_lock irq_context: 0 pid_caches_mutex slab_mutex &rq->__lock irq_context: 0 pid_caches_mutex slab_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#23/1 rcu_read_lock rcu_node_0 irq_context: 0 &type->s_umount_key#23/1 rcu_read_lock &rq->__lock irq_context: 0 nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nl_table_wait.lock &p->pi_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pid_caches_mutex slab_mutex &____s->seqcount#2 irq_context: 0 pid_caches_mutex slab_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem lock kernfs_idr_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex.wait_lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &p->pi_lock irq_context: 0 lock prog_idr_lock &c->lock irq_context: 0 sb_writers &dentry->d_lock irq_context: 0 sb_writers tomoyo_ss irq_context: 0 sb_writers tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers tomoyo_ss &c->lock irq_context: 0 sb_writers tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &sb->s_type->i_lock_key#5 &xa->xa_lock#7 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 lock#5 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &lruvec->lru_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &obj_hash[i].lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &sb->s_type->i_lock_key#5 &xa->xa_lock#7 &obj_hash[i].lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &sb->s_type->i_lock_key#5 &xa->xa_lock#7 pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_node_0 irq_context: 0 pernet_ops_rwsem uevent_sock_mutex uevent_sock_mutex.wait_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &sem->wait_lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &xa->xa_lock#7 &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex kfence_freelist_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &meta->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &iint->mutex ima_extend_list_mutex &n->list_lock irq_context: 0 &iint->mutex ima_extend_list_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 tomoyo_ss &____s->seqcount#2 irq_context: 0 sb_writers#4 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 key#3 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 key#15 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &sem->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx lock kernfs_idr_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex uevent_sock_mutex.wait_lock irq_context: 0 &ep->mtx uevent_sock_mutex.wait_lock irq_context: 0 &ep->mtx &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &ep->mtx &____s->seqcount#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &sb->s_type->i_lock_key#5 &xa->xa_lock#7 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &info->lock key#9 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &q->queue_lock &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &q->queue_lock &blkcg->lock &c->lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sk_lock-AF_INET rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &n->list_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu quarantine_lock irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu &c->lock irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu &n->list_lock irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 fill_pool_map-wait-type-override pool_lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &mapping->private_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &type->s_umount_key#23/1 pcpu_alloc_mutex &rq->__lock irq_context: 0 &type->s_umount_key#23/1 pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers tomoyo_ss &rq->__lock irq_context: 0 sb_writers tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &ep->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->lock &ei->socket.wq.wait &ep->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &ei->i_data_sem &c->lock irq_context: 0 &ei->i_data_sem &n->list_lock irq_context: 0 &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 tracepoints_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock stock_lock irq_context: 0 rcu_read_lock &____s->seqcount#7 irq_context: 0 rcu_read_lock &nf_nat_locks[i] irq_context: 0 rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 rcu_read_lock &dir->lock#2 irq_context: 0 rcu_read_lock &tbl->lock irq_context: 0 rcu_read_lock &n->lock pool_lock#2 irq_context: 0 rcu_read_lock stock_lock irq_context: 0 rcu_read_lock &ul->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex sysctl_lock krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sysctl_lock krc.lock &base->lock irq_context: 0 rtnl_mutex sysctl_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#5 &lruvec->lru_lock irq_context: 0 pernet_ops_rwsem krc.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem krc.lock &base->lock irq_context: 0 pernet_ops_rwsem krc.lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu quarantine_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &hashinfo->ehash_locks[i] irq_context: 0 &mm->mmap_lock lock#5 &lruvec->lru_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock tcp_metrics_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock tcp_metrics_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 tk_core.seq.seqcount irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 cb_lock genl_mutex (console_sem).lock irq_context: 0 cb_lock genl_mutex console_lock console_srcu console_owner_lock irq_context: 0 cb_lock genl_mutex console_lock console_srcu console_owner irq_context: 0 cb_lock genl_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 cb_lock genl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &c->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock kfence_freelist_lock irq_context: 0 rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xa->xa_lock#7 &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &rnp->exp_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex.wait_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &vma->vm_lock->lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &sem->wait_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &____s->seqcount#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &____s->seqcount#2 irq_context: softirq rcu_callback per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &mm->mmap_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 pernet_ops_rwsem remove_cache_srcu pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &ht->mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem key#3 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_wait_updates irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &x->wait#17 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &cfs_rq->removed.lock irq_context: 0 slock-AF_INET6 &sk->sk_lock.wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 slock-AF_INET6 &sk->sk_lock.wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &meta->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem kfence_freelist_lock irq_context: 0 &ep->mtx remove_cache_srcu &c->lock irq_context: 0 &ep->mtx remove_cache_srcu &n->list_lock irq_context: 0 &ep->mtx remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &ep->mtx remove_cache_srcu &obj_hash[i].lock irq_context: 0 &ep->mtx remove_cache_srcu pool_lock#2 irq_context: 0 &ep->mtx remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &ep->mtx remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &ep->mtx remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#5 irq_context: 0 &mm->mmap_lock lock#5 &lruvec->lru_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &tsk->futex_exit_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &rnp->exp_wq[2] irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem quarantine_lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work sb_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rnp->exp_wq[1] irq_context: 0 pernet_ops_rwsem rtnl_mutex &rnp->exp_wq[1] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &meta->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wq->mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wq->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rnp->exp_wq[2] irq_context: softirq (&peer->timer_send_keepalive) irq_context: softirq (&peer->timer_send_keepalive) pool_lock#2 irq_context: softirq (&peer->timer_send_keepalive) &c->lock irq_context: softirq (&peer->timer_send_keepalive) &list->lock#17 irq_context: softirq (&peer->timer_send_keepalive) tk_core.seq.seqcount irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh &r->producer_lock#2 irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&net->ipv6.addr_chk_work)->timer irq_context: softirq &(&net->ipv6.addr_chk_work)->timer rcu_read_lock &pool->lock irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq &(&net->ipv6.addr_chk_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh &base->lock irq_context: softirq &(&net->ipv6.addr_chk_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 namespace_sem &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)bat_events &rq->__lock irq_context: 0 namespace_sem &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)bat_events &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &hashinfo->ehash_locks[i] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#7 irq_context: 0 &lruvec->lru_lock irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#7 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#7 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 batched_entropy_u8.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 kfence_freelist_lock irq_context: softirq (&peer->timer_persistent_keepalive) &____s->seqcount#2 irq_context: softirq (&peer->timer_persistent_keepalive) &____s->seqcount irq_context: 0 sb_writers#4 &journal->j_list_lock irq_context: 0 &p->lock remove_cache_srcu pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: softirq (&n->timer) &n->lock &c->lock irq_context: softirq (&n->timer) &n->lock pool_lock#2 irq_context: softirq (&n->timer) pool_lock#2 irq_context: softirq (&n->timer) &dir->lock#2 irq_context: softirq (&n->timer) &c->lock irq_context: softirq (&n->timer) &ul->lock#2 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: softirq (&n->timer) &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss mount_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss mount_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#7 batched_entropy_u8.lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#7 kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex bpf_devs_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex bpf_devs_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &____s->seqcount irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dentry->d_lock &sb->s_type->i_lock_key#24 &dentry->d_lock &lru->node[i].lock irq_context: 0 &ep->mtx uevent_sock_mutex &____s->seqcount irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock pool_lock#2 irq_context: 0 uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&hsr->announce_timer) rcu_read_lock &____s->seqcount#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &____s->seqcount irq_context: 0 uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &rnp->exp_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock krc.lock &base->lock irq_context: 0 rtnl_mutex &idev->mc_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex remove_cache_srcu pool_lock#2 irq_context: 0 rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events free_ipc_work &rnp->exp_wq[2] irq_context: 0 (wq_completion)events free_ipc_work &pool->lock irq_context: 0 (wq_completion)events free_ipc_work &rq->__lock irq_context: 0 (wq_completion)events free_ipc_work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_lock_key#27 irq_context: 0 &sb->s_type->i_mutex_key#18 irq_context: 0 &sb->s_type->i_mutex_key#18 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#18 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#18 sb_writers#13 mount_lock irq_context: 0 &sb->s_type->i_mutex_key#18 sb_writers#13 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#18 sb_writers#13 &sb->s_type->i_lock_key#27 irq_context: 0 &sb->s_type->i_mutex_key#18 sb_writers#13 &wb->list_lock irq_context: 0 &sb->s_type->i_mutex_key#18 sb_writers#13 &wb->list_lock &sb->s_type->i_lock_key#27 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &sem->wait_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &sem->wait_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &p->pi_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock &base->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 namespace_sem rcu_node_0 irq_context: 0 namespace_sem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback kfence_freelist_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#7 &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#7 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock pool_lock#2 irq_context: 0 cpuset_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &n->lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock tcp_metrics_lock &c->lock irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 cb_lock remove_cache_srcu pool_lock#2 irq_context: 0 delayed_uprobe_lock &rq->__lock irq_context: softirq (&n->timer) rcu_read_lock &ndev->lock irq_context: softirq (&n->timer) icmp_global.lock irq_context: softirq (&n->timer) icmp_global.lock batched_entropy_u8.lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &c->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock &c->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&n->timer) rcu_read_lock id_table_lock irq_context: softirq (&n->timer) &n->lock irq_context: softirq (&n->timer) nl_table_lock irq_context: softirq (&n->timer) nl_table_wait.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &dir->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &dir->lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock rlock-AF_PACKET irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq (&p->forward_delay_timer) irq_context: softirq (&p->forward_delay_timer) &br->lock irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock pool_lock#2 irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock nl_table_lock irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock &obj_hash[i].lock irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock nl_table_wait.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock pool_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: softirq rcu_read_lock rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 tomoyo_ss remove_cache_srcu pool_lock#2 irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 &mm->mmap_lock &mm->page_table_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mm->page_table_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &mm->page_table_lock stock_lock irq_context: 0 &mm->mmap_lock &mm->page_table_lock rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem &rnp->exp_lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events free_ipc_work &p->pi_lock irq_context: 0 (wq_completion)events free_ipc_work &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events free_ipc_work &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock &c->lock irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock &n->list_lock irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem sysctl_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem sysctl_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem sysctl_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem sysctl_lock fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem sysctl_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 pernet_ops_rwsem sysctl_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 &mm->mmap_lock remove_cache_srcu pool_lock#2 irq_context: 0 sk_lock-AF_INET remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &meta->lock irq_context: 0 &mm->mmap_lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 lock pidmap_lock rcu_read_lock pool_lock#2 irq_context: 0 lock pidmap_lock &obj_hash[i].lock irq_context: 0 rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 rcu_state.exp_mutex pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work &xa->xa_lock#3 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 lock#4 &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: softirq (&n->timer) &n->lock &____s->seqcount#2 irq_context: softirq (&n->timer) &n->lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq &(&bat_priv->bla.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq &(&bat_priv->bla.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bat_priv->bla.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem batched_entropy_u8.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)afs (work_completion)(&net->cells_manager) &rq->__lock irq_context: 0 (wq_completion)afs (work_completion)(&net->fs_manager) &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[1] &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rnp->exp_wq[3] irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rnp->exp_wq[3] irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock fs_reclaim irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle bit_wait_table + i irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &xa->xa_lock#7 batched_entropy_u8.lock irq_context: 0 &xa->xa_lock#7 kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu pool_lock#2 irq_context: 0 pernet_ops_rwsem nl_table_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex &rq->__lock irq_context: 0 &type->s_umount_key#23/1 pcpu_alloc_mutex.wait_lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#23/1 &p->pi_lock irq_context: 0 &type->s_umount_key#23/1 &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#23/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 namespace_sem pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 rcu_read_lock &wb->work_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 rcu_read_lock &wb->work_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 rcu_read_lock &wb->work_lock &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 rcu_read_lock &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 &bdi->wb_waitq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &xa->xa_lock#7 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem lock#4 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem lock#5 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle lock#4 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle lock#5 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem key#3 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#15 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 &s->s_inode_wblist_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &bdi->wb_waitq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &bdi->wb_waitq &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &bdi->wb_waitq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &bdi->wb_waitq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &journal->j_state_lock &journal->j_wait_transaction_locked &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &pa->pa_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rnp->exp_wq[3] irq_context: 0 key#22 irq_context: 0 sk_lock-AF_INET6 clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 &sctp_port_hashtable[i].lock irq_context: 0 sk_lock-AF_INET6 &sctp_port_hashtable[i].lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 &sctp_port_hashtable[i].lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 &sctp_port_hashtable[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &sctp_port_hashtable[i].lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rhashtable_bucket irq_context: 0 sk_lock-AF_INET6 &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 &base->lock irq_context: 0 sk_lock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 &asoc->wait irq_context: 0 sk_lock-AF_INET6 &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock stock_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock key#23 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock remove_cache_srcu irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock remove_cache_srcu pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock remove_cache_srcu &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &meta->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kfence_freelist_lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &c->lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 pernet_ops_rwsem nf_hook_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 stock_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#8 &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET6 krc.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->private_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &memcg->move_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#7 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#7 &s->s_inode_wblist_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_NETLINK &rq->__lock irq_context: 0 sk_lock-AF_NETLINK &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 batched_entropy_u8.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 kfence_freelist_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_es_lock key#2 irq_context: softirq rcu_callback key#23 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 sk_lock-AF_INET6 sctp_assocs_id_lock irq_context: 0 sk_lock-AF_INET6 sctp_assocs_id_lock &c->lock irq_context: 0 sk_lock-AF_INET6 sctp_assocs_id_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 sctp_assocs_id_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 sctp_assocs_id_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &list->lock#25 irq_context: 0 sk_lock-AF_INET6 krc.lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 krc.lock &base->lock irq_context: 0 sk_lock-AF_INET6 krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &journal->j_state_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle &ei->i_raw_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_wait_updates irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &memcg->move_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &xa->xa_lock#7 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &mapping->private_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &mapping->private_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rlock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &list->lock#24 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &list->lock#25 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 krc.lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rhashtable_bucket irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &sctp_ep_hashtable[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &sctp_port_hashtable[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &sctp_port_hashtable[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &sctp_port_hashtable[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &meta->lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu &rq->__lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu quarantine_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex &ACCESS_PRIVATE(sdp, lock) irq_context: 0 tracepoints_mutex tracepoint_srcu irq_context: 0 tracepoints_mutex &x->wait#3 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &____s->seqcount#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) tracepoint_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex subsys mutex#17 &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex subsys mutex#17 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &folio_wait_table[i] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &folio_wait_table[i] &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 lock#5 &lruvec->lru_lock irq_context: 0 sk_lock-AF_INET6 batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 kfence_freelist_lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 krc.lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 krc.lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 krc.lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 lock#5 &lruvec->lru_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#15 irq_context: 0 pernet_ops_rwsem rtnl_mutex pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 lock#5 &lruvec->lru_lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work rcu_node_0 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) kfence_freelist_lock irq_context: 0 cb_lock genl_mutex rcu_node_0 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &mapping->private_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock &rq->__lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex gdp_mutex &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &mapping->private_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.expedited_wq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &ei->xattr_sem irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock rcu_read_lock pool_lock#2 irq_context: 0 lock#5 &lruvec->lru_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &rnp->exp_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &rnp->exp_wq[2] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#7 &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rnp->exp_wq[3] irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &sem->wait_lock irq_context: 0 kn->active#5 remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &lock->wait_lock irq_context: 0 &f->f_pos_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex batched_entropy_u8.lock irq_context: 0 &xt[i].mutex kfence_freelist_lock irq_context: 0 &xt[i].mutex &meta->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &mapping->private_lock irq_context: 0 &f->f_pos_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock pool_lock#2 irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#7 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &mapping->private_lock rcu_read_lock rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &pcp->lock &zone->lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock sb_pagefaults quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sem->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sem->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG fs_reclaim irq_context: 0 sk_lock-AF_ALG fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_ALG pool_lock#2 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock irq_context: 0 sk_lock-AF_ALG &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &c->lock irq_context: 0 sk_lock-AF_ALG &dir->lock irq_context: 0 sk_lock-AF_ALG &____s->seqcount irq_context: 0 sk_lock-AF_ALG &ei->socket.wq.wait irq_context: 0 sb_writers#4 &meta->lock irq_context: 0 sb_writers#4 kfence_freelist_lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_ALG rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG slock-AF_ALG &sk->sk_lock.wq irq_context: 0 sk_lock-AF_ALG rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_ALG &rq->__lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 slock-AF_ALG &sk->sk_lock.wq irq_context: 0 slock-AF_ALG &sk->sk_lock.wq &p->pi_lock irq_context: 0 slock-AF_ALG &sk->sk_lock.wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 slock-AF_ALG &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 slock-AF_ALG &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults tk_core.seq.seqcount irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults pool_lock#2 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &journal->j_state_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle &ei->i_raw_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &memcg->move_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &xa->xa_lock#7 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &mapping->private_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &mapping->private_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &folio_wait_table[i] irq_context: 0 sk_lock-AF_ALG rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_wait_updates irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &n->list_lock irq_context: 0 sk_lock-AF_ALG &n->list_lock &c->lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &rq->__lock &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &xt[i].mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &xt[i].mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&n->timer) &n->list_lock irq_context: softirq (&n->timer) &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 rcu_node_0 irq_context: 0 pernet_ops_rwsem rcu_read_lock pgd_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock stock_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock key irq_context: 0 pernet_ops_rwsem rcu_read_lock pcpu_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock percpu_counters_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock pcpu_lock stock_lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &bgl->locks[i].lock irq_context: 0 unix_gc_lock irq_context: 0 &sb->s_type->i_mutex_key#10 unix_gc_lock irq_context: 0 &sb->s_type->i_mutex_key#10 unix_gc_lock rlock-AF_UNIX irq_context: 0 &sb->s_type->i_mutex_key#10 unix_gc_lock unix_gc_wait.lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex rcu_read_lock nl_table_lock irq_context: 0 rtnl_mutex rcu_read_lock nl_table_wait.lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &xa->xa_lock#3 irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &xa->xa_lock#3 pool_lock#2 irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 stock_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rtnl_mutex _xmit_ETHER/2 irq_context: 0 rtnl_mutex (work_completion)(&(&slave->notify_work)->work) irq_context: 0 rtnl_mutex (&hsr->prune_timer) irq_context: 0 rtnl_mutex (&hsr->announce_timer) irq_context: 0 rtnl_mutex &dentry->d_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &xa->xa_lock#7 irq_context: 0 rtnl_mutex rcu_read_lock &dentry->d_lock irq_context: 0 rtnl_mutex &fsnotify_mark_srcu irq_context: 0 rtnl_mutex &sb->s_type->i_lock_key#7 irq_context: 0 rtnl_mutex &s->s_inode_list_lock irq_context: 0 rtnl_mutex &xa->xa_lock#7 irq_context: 0 rtnl_mutex rcu_read_lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex rcu_read_lock kfence_freelist_lock irq_context: 0 rtnl_mutex rcu_read_lock &meta->lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER/2 irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER/2 &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER/2 &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER/2 pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER/2 krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex remove_cache_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem proc_inum_ida.xa_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem proc_inum_ida.xa_lock pool_lock#2 irq_context: 0 (wq_completion)events fqdir_free_work &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &____s->seqcount irq_context: 0 &pool->lock/1 &x->wait#10 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rnp->exp_wq[2] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_state_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) &base->lock irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &bgl->locks[i].lock irq_context: 0 remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_ALG sk_lock-AF_ALG/1 irq_context: 0 sk_lock-AF_ALG sk_lock-AF_ALG/1 slock-AF_ALG irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ei->i_prealloc_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &mapping->private_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock &journal->j_list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &pa->pa_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &c->lock irq_context: 0 namespace_sem rcu_read_lock &rq->__lock irq_context: 0 namespace_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &wq->mutex &pool->lock/1 irq_context: 0 rtnl_mutex &wq->mutex &x->wait#10 irq_context: 0 rtnl_mutex &pool->lock/1 irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock key#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_es_lock key#6 irq_context: 0 &sb->s_type->i_mutex_key#8 &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &rq->__lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_node_0 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &rcu_state.expedited_wq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &block->lock fs_reclaim irq_context: 0 rtnl_mutex &block->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &block->lock &c->lock irq_context: 0 rtnl_mutex &block->lock pool_lock#2 irq_context: 0 rtnl_mutex &chain->filter_chain_lock irq_context: 0 rtnl_mutex cls_mod_lock irq_context: 0 rtnl_mutex &chain->filter_chain_lock &block->lock irq_context: 0 rtnl_mutex &chain->filter_chain_lock &block->proto_destroy_lock irq_context: 0 rtnl_mutex &block->proto_destroy_lock irq_context: 0 rtnl_mutex &block->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &block->lock nl_table_lock irq_context: 0 rtnl_mutex &block->lock nl_table_wait.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex text_mutex.wait_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex.wait_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex &p->pi_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex &p->pi_lock &rq->__lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &n->list_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &n->list_lock &c->lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock batched_entropy_u8.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock kfence_freelist_lock irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex uevent_sock_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rcu_state.expedited_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex remove_cache_srcu irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex remove_cache_srcu quarantine_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex remove_cache_srcu &c->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex remove_cache_srcu &n->list_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &n->list_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work &rnp->exp_wq[1] irq_context: softirq (&peer->timer_send_keepalive) &n->list_lock irq_context: softirq (&peer->timer_send_keepalive) &n->list_lock &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bcm_notifier_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sighand->siglock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 bcm_notifier_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_CAN irq_context: 0 &sb->s_type->i_mutex_key#10 elock-AF_CAN irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rnp->exp_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) pool_lock#2 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &____s->seqcount#2 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &____s->seqcount irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &c->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_node_0 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock rcu_node_0 irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &meta_group_info[i]->alloc_sem &bgl->locks[i].lock irq_context: 0 &xt[i].mutex &base->lock irq_context: 0 &xt[i].mutex &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 l2tp_ip_lock irq_context: 0 &xt[i].mutex &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_es_lock key#6 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 file_rwsem &rq->__lock irq_context: 0 file_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &c->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &n->list_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &n->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock nl_table_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock nl_table_wait.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock rcu_read_lock id_table_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &dir->lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &meta->lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock rcu_node_0 irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss &n->list_lock &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex stock_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem &rq->__lock irq_context: 0 &type->i_mutex_dir_key#7 &c->lock irq_context: 0 &type->i_mutex_dir_key#7 &n->list_lock irq_context: 0 &type->i_mutex_dir_key#7 &n->list_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#7 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &n->list_lock &c->lock irq_context: 0 sb_writers#8 tomoyo_ss pool_lock#2 irq_context: 0 kn->active#5 rcu_read_lock rcu_node_0 irq_context: 0 kn->active#5 rcu_read_lock &rq->__lock irq_context: 0 kn->active#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 l2tp_ip_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle kfence_freelist_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock pool_lock#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_node_0 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &dir->lock#2 irq_context: 0 rtnl_mutex &tb->tb6_lock stock_lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock &n->list_lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_node_0 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem batched_entropy_u8.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem kfence_freelist_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_reserved irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 quarantine_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle key#4 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 &xt[i].mutex fs_reclaim &rq->__lock irq_context: 0 &xt[i].mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rq->__lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &n->list_lock &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &meta->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock kfence_freelist_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cgroup_mutex.wait_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &n->list_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 cb_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex fs_reclaim &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex.wait_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &p->pi_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &sem->wait_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &rq->__lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex ima_extend_list_mutex &rq->__lock irq_context: 0 &iint->mutex ima_extend_list_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock batched_entropy_u8.lock crngs.lock irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_es_lock key#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &____s->seqcount irq_context: 0 &sighand->siglock &c->lock irq_context: 0 cb_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 cb_lock rcu_read_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &rq->__lock irq_context: 0 tasklist_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#8 batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#8 kfence_freelist_lock irq_context: 0 tracepoints_mutex &obj_hash[i].lock pool_lock irq_context: 0 proto_tab_lock irq_context: 0 proto_tab_lock pool_lock#2 irq_context: 0 proto_tab_lock &dir->lock irq_context: 0 proto_tab_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_NFC irq_context: 0 sk_lock-AF_NFC slock-AF_NFC irq_context: 0 sk_lock-AF_NFC &k->list_lock irq_context: 0 sk_lock-AF_NFC &k->k_lock irq_context: 0 sk_lock-AF_NFC llcp_devices_lock irq_context: 0 sk_lock-AF_NFC fs_reclaim irq_context: 0 sk_lock-AF_NFC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_NFC pool_lock#2 irq_context: 0 sk_lock-AF_NFC &local->sdp_lock irq_context: 0 sk_lock-AF_NFC &local->sdp_lock &local->sockets.lock irq_context: 0 sk_lock-AF_NFC &local->sdp_lock &rq->__lock irq_context: 0 sk_lock-AF_NFC &local->sdp_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_NFC &local->sockets.lock irq_context: 0 slock-AF_NFC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NFC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NFC slock-AF_NFC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NFC &local->sockets.lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_NFC irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_NFC irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_NFC irq_context: 0 &sb->s_type->i_mutex_key#10 &list->lock#26 irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &cfs_rq->removed.lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &pcp->lock &zone->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem key#3 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#15 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock key#10 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#7 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 sk_lock-AF_NFC &rq->__lock irq_context: 0 proto_tab_lock &c->lock irq_context: 0 sk_lock-AF_NFC &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &n->list_lock &c->lock irq_context: 0 misc_mtx &n->list_lock irq_context: 0 misc_mtx &n->list_lock &c->lock irq_context: 0 &vma->vm_lock->lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &tn->lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex bus_type_sem &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex bus_type_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&app->join_timer) batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem &rnp->exp_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 tracepoints_mutex tracepoints_mutex.wait_lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 tracepoints_mutex &p->pi_lock irq_context: 0 tracepoints_mutex &p->pi_lock &rq->__lock irq_context: 0 tracepoints_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex.wait_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock kernfs_idr_lock &n->list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &cfs_rq->removed.lock irq_context: 0 &iint->mutex &p->alloc_lock irq_context: 0 &iint->mutex &list->lock irq_context: 0 &iint->mutex kauditd_wait.lock irq_context: 0 &iint->mutex kauditd_wait.lock &p->pi_lock irq_context: 0 &iint->mutex kauditd_wait.lock &p->pi_lock &rq->__lock irq_context: 0 &iint->mutex kauditd_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#7 &dentry->d_lock &wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &n->list_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &hashinfo->lock#2 irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_node_0 irq_context: 0 &xt[i].mutex remove_cache_srcu &rq->__lock irq_context: 0 &xt[i].mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rcu_state.exp_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 rcu_state.exp_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &dentry->d_lock &dentry->d_lock &lru->node[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem inode_hash_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem stock_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &sb->s_type->i_lock_key#31 &dentry->d_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound (reaper_work).work fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events_unbound (reaper_work).work fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events_unbound (reaper_work).work fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &table->hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &table->hash[i].lock &table->hash2[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &xa->xa_lock#7 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &fsnotify_mark_srcu irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-clock-AF_RXRPC irq_context: 0 pernet_ops_rwsem (wq_completion)krxrpcd irq_context: 0 pernet_ops_rwsem &wq->mutex irq_context: 0 pernet_ops_rwsem &wq->mutex &pool->lock/1 irq_context: 0 pernet_ops_rwsem &wq->mutex &x->wait#10 irq_context: 0 pernet_ops_rwsem rlock-AF_RXRPC irq_context: 0 pernet_ops_rwsem (&net->fs_probe_timer) irq_context: 0 pernet_ops_rwsem &net->cells_lock irq_context: 0 pernet_ops_rwsem (&net->cells_timer) irq_context: 0 pernet_ops_rwsem bit_wait_table + i irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#7 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#7 fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem (&net->fs_timer) irq_context: 0 pernet_ops_rwsem ovs_mutex irq_context: 0 pernet_ops_rwsem ovs_mutex (work_completion)(&data->gc_work) irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex stock_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &c->lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex nf_hook_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex nf_hook_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem ovs_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem ovs_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_connlabels_lock irq_context: 0 pernet_ops_rwsem ovs_mutex net_rwsem irq_context: 0 pernet_ops_rwsem (work_completion)(&(&ovs_net->masks_rebalance)->work) irq_context: 0 pernet_ops_rwsem (work_completion)(&ovs_net->dp_notify_work) irq_context: 0 pernet_ops_rwsem &srv->idr_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 pernet_ops_rwsem &pool->lock/1 irq_context: 0 pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &mapping->private_lock rcu_read_lock &p->pi_lock irq_context: 0 sb_writers#4 &mapping->private_lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &mapping->private_lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem (&rxnet->peer_keepalive_timer) irq_context: 0 pernet_ops_rwsem (work_completion)(&rxnet->peer_keepalive_work) irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock krc.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &nt->cluster_scope_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock krc.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC rcu_read_lock rhashtable_bucket irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC k-clock-AF_TIPC irq_context: 0 pernet_ops_rwsem (work_completion)(&tn->work) irq_context: 0 pernet_ops_rwsem &tn->nametbl_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_node_0 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem (work_completion)(&(&c->work)->work) irq_context: 0 sb_writers#4 &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem (wq_completion)krdsd irq_context: 0 pernet_ops_rwsem (work_completion)(&rtn->rds_tcp_accept_w) irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &queue->rskq_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 elock-AF_INET6 irq_context: 0 pernet_ops_rwsem rds_tcp_conn_lock irq_context: 0 pernet_ops_rwsem loop_conns_lock irq_context: 0 pernet_ops_rwsem (wq_completion)l2tp irq_context: 0 sk_lock-AF_INET6 tcpv6_prot_mutex irq_context: 0 sk_lock-AF_INET6 tcpv6_prot_mutex rcu_node_0 irq_context: 0 sk_lock-AF_INET6 tcpv6_prot_mutex &rq->__lock irq_context: 0 sk_lock-AF_INET6 tcpv6_prot_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 device_spinlock irq_context: 0 sk_lock-AF_INET6 clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 clock-AF_INET6 pool_lock#2 irq_context: 0 sk_lock-AF_INET6 crypto_alg_sem irq_context: 0 sk_lock-AF_INET6 (kmod_concurrent_max).lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock/1 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 &x->wait#17 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu pool_lock#2 irq_context: 0 pernet_ops_rwsem (&rxnet->service_conn_reap_timer) irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex &rnp->exp_lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ei->i_prealloc_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pa->pa_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &mapping->private_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock &journal->j_list_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &pa->pa_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 pernet_ops_rwsem rtnl_mutex _xmit_NONE irq_context: 0 sb_writers#4 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 running_helpers_waitq.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &folio_wait_table[i] irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) quarantine_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_node_0 irq_context: 0 namespace_sem remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_node_0 irq_context: 0 sk_lock-AF_INET6 crypto_alg_sem irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock link_idr_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock stock_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&w->w) &base->lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) &base->lock &obj_hash[i].lock irq_context: 0 k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem fs_reclaim irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem &c->lock irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem pool_lock#2 irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem kthread_create_lock irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem &p->pi_lock irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem &x->wait irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem &rq->__lock irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &cfs_rq->removed.lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &rcu_state.expedited_wq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem rtnl_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &x->wait#21 irq_context: 0 k-slock-AF_INET6 rcu_read_lock &c->lock irq_context: 0 k-slock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: 0 k-slock-AF_INET6 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 k-slock-AF_INET6 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 k-slock-AF_INET6 rcu_read_lock rcu_read_lock &c->lock irq_context: 0 k-slock-AF_INET6 rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 k-slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock irq_context: 0 k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_node_0 irq_context: 0 rcu_read_lock &sighand->siglock &n->list_lock irq_context: 0 rcu_read_lock &sighand->siglock &n->list_lock &c->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xa->xa_lock#7 &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 (&timer.timer) irq_context: 0 &sb->s_type->i_mutex_key#10 krc.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle bit_wait_table + i irq_context: 0 k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 sb_writers#8 remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 clock-AF_INET6 &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex rcu_read_lock mfc_unres_lock irq_context: 0 rtnl_mutex rcu_read_lock mfc_unres_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex rcu_node_0 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &wg->device_update_lock irq_context: 0 &wg->device_update_lock &wg->socket_update_lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex &rq->__lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &wg->device_update_lock &obj_hash[i].lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &wg->device_update_lock &wq->mutex irq_context: 0 &wg->device_update_lock pcpu_lock irq_context: 0 &wg->device_update_lock &wq->mutex &pool->lock irq_context: 0 &wg->device_update_lock &wq->mutex &x->wait#10 irq_context: 0 &wg->device_update_lock wq_pool_mutex irq_context: 0 &wg->device_update_lock wq_pool_mutex &rq->__lock irq_context: 0 &wg->device_update_lock wq_pool_mutex &wq->mutex irq_context: 0 &wg->device_update_lock wq_pool_mutex &wq->mutex &pool->lock irq_context: 0 &wg->device_update_lock &rq->__lock irq_context: 0 &wg->device_update_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &wg->device_update_lock pool_lock#2 irq_context: 0 &wg->device_update_lock rcu_state.barrier_mutex.wait_lock irq_context: 0 &wg->device_update_lock init_lock irq_context: 0 &wg->device_update_lock &zone->lock irq_context: 0 &wg->device_update_lock &wq->mutex &pool->lock/1 irq_context: 0 &wg->device_update_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 &wg->device_update_lock &pool->lock/1 irq_context: 0 &wg->device_update_lock &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 &wg->device_update_lock &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &wg->device_update_lock &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &wg->device_update_lock &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &wg->device_update_lock &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &wg->device_update_lock &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &wg->device_update_lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&(&sw_ctx_tx->tx_work.work)->work) irq_context: 0 &wg->device_update_lock wq_mayday_lock irq_context: 0 &wg->device_update_lock &p->pi_lock irq_context: 0 &wg->device_update_lock &p->pi_lock &rq->__lock irq_context: 0 &wg->device_update_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &wg->device_update_lock &x->wait irq_context: 0 &wg->device_update_lock &r->consumer_lock#2 irq_context: 0 &wg->device_update_lock rcu_read_lock pool_lock#2 irq_context: 0 &wg->device_update_lock rcu_state.barrier_mutex irq_context: 0 &wg->device_update_lock rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 &wg->device_update_lock rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 &wg->device_update_lock rcu_state.barrier_mutex &x->wait#24 irq_context: 0 &wg->device_update_lock rcu_state.barrier_mutex &rq->__lock irq_context: 0 &wg->device_update_lock rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &rnp->exp_wq[3] irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[3] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &sw_ctx_tx->encrypt_compl_lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 sb_writers#8 tomoyo_ss batched_entropy_u8.lock irq_context: 0 sb_writers#8 tomoyo_ss kfence_freelist_lock irq_context: 0 sb_writers#8 tomoyo_ss &meta->lock irq_context: 0 sb_writers#8 iattr_mutex rcu_node_0 irq_context: 0 sb_writers#8 iattr_mutex &rcu_state.expedited_wq irq_context: 0 sb_writers#8 iattr_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#8 iattr_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 iattr_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 iattr_mutex &rq->__lock irq_context: 0 sb_writers#8 iattr_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &pnn->pndevs.lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &pnn->pndevs.lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &____s->seqcount irq_context: 0 pernet_ops_rwsem sysctl_lock krc.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem sysctl_lock krc.lock &base->lock irq_context: 0 pernet_ops_rwsem sysctl_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem proc_inum_ida.xa_lock &obj_hash[i].lock irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu quarantine_lock irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu &c->lock irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu &n->list_lock irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu &obj_hash[i].lock irq_context: 0 &pipe->rd_wait &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: 0 &pipe->rd_wait &p->pi_lock &rq->__lock &base->lock irq_context: 0 &pipe->rd_wait &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pa->pa_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &pa->pa_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#23/1 &xa->xa_lock#3 &n->list_lock irq_context: 0 &type->s_umount_key#23/1 &xa->xa_lock#3 &n->list_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &x->wait#9 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem (console_sem).lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &pdata->netdev_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &pdata->netdev_lock &____s->seqcount irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &pdata->netdev_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &pdata->netdev_lock rcu_read_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &pdata->netdev_lock &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &pdata->netdev_lock &dir->lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem ndev_hash_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem crypto_alg_sem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem crypto_alg_sem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem devices.xa_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock &pdata->netdev_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock rtnl_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock (console_sem).lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock console_lock console_srcu console_owner_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock console_lock console_srcu console_owner irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 fs_reclaim irq_context: 0 sb_writers#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 mapping.invalidate_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 mapping.invalidate_lock &____s->seqcount irq_context: 0 sb_writers#4 mapping.invalidate_lock pool_lock#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock stock_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#7 irq_context: 0 sb_writers#4 mapping.invalidate_lock lock#4 irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#7 stock_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#7 pool_lock#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &ei->i_es_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 mapping.invalidate_lock &dd->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &dd->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 sb_writers#4 &folio_wait_table[i] irq_context: 0 sb_writers#4 sb_writers#4 mount_lock irq_context: 0 sb_writers#4 sb_writers#4 tk_core.seq.seqcount irq_context: 0 sb_writers#4 sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 sb_writers#4 pool_lock#2 irq_context: 0 sb_writers#4 sb_writers#4 &journal->j_state_lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 sb_writers#4 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 (&timer.timer) irq_context: 0 &sighand->siglock batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock &journal->j_list_lock irq_context: 0 &type->s_umount_key#23/1 pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex fs_reclaim &rq->__lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_wait_done_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &c->lock irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &pcp->lock &zone->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 stock_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &xa->xa_lock#7 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#7 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#7 &s->s_inode_wblist_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &xa->xa_lock#7 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem lock#4 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem lock#5 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &dd->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: softirq rcu_read_lock &xa->xa_lock#7 &pl->lock irq_context: softirq rcu_read_lock &xa->xa_lock#7 &pl->lock key#12 irq_context: 0 sb_writers#4 mapping.invalidate_lock &rq->__lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem proc_inum_ida.xa_lock &c->lock irq_context: 0 sb_writers#4 sb_writers#4 &rq->__lock irq_context: 0 sb_writers#4 sb_writers#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_writers#4 &journal->j_state_lock irq_context: 0 sb_writers#4 sb_writers#4 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 sb_writers#4 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_writers#4 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 sb_writers#4 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &n->list_lock irq_context: 0 sb_writers#4 &n->list_lock &c->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 &n->list_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 &table->rwlock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 &device->event_handler_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem rcu_read_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &ndev->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &ndev->lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex.wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock rtnl_mutex &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &journal->j_wait_commit irq_context: 0 sb_writers#4 &journal->j_wait_done_commit irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock rtnl_mutex.wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex &root->kernfs_rwsem irq_context: 0 sb_writers tomoyo_ss &n->list_lock irq_context: 0 sb_writers tomoyo_ss &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#7 stock_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &device->cache_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rdmacg_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &____s->seqcount irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &k->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex &k->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex lock kernfs_idr_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex gdp_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex kobj_ns_type_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex.wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem lock kernfs_idr_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem bus_type_sem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem sysfs_symlink_target_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &____s->seqcount#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &dev->power.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem dpm_list_mtx irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem subsys mutex#83 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem subsys mutex#83 &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem subsys mutex#83 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle lock#4 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle lock#5 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem subsys mutex#83 &k->k_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 &s->s_inode_wblist_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &zone->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem lock kernfs_idr_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &n->list_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &pcp->lock &zone->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem &sem->wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &sem->wait_lock irq_context: 0 pernet_ops_rwsem &xt[i].mutex &lock->wait_lock irq_context: 0 pernet_ops_rwsem &xt[i].mutex &rq->__lock irq_context: 0 pernet_ops_rwsem &xt[i].mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem lock kernfs_idr_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rcu_read_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem fs_reclaim &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu &rq->__lock irq_context: softirq &(&krcp->monitor_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu quarantine_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &sem->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &lock->wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rcu_state.expedited_wq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem lock kernfs_idr_lock &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#7 &c->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &lock->wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &c->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle bit_wait_table + i irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_writers#4 rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock &q->requeue_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &x->wait#26 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rcu_read_lock &pool->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)infiniband irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 fs_reclaim irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 pool_lock#2 irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock &pdata->netdev_lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock rtnl_mutex irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock rtnl_mutex &pool->lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem (console_sem).lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem console_lock console_srcu console_owner_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem console_lock console_srcu console_owner irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem console_lock console_srcu console_owner &port_lock_key irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock rtnl_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock rtnl_mutex.wait_lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock &p->pi_lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock (console_sem).lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock console_lock console_srcu console_owner irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 sk_lock-AF_INET &____s->seqcount#2 irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock rcu_node_0 irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock &rq->__lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem &pdata->netdev_lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem &table->lock#4 irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &device->cache_lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &obj_hash[i].lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &device->event_handler_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem &cfs_rq->removed.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem console_owner_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem console_owner irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#15 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#15 pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#16 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#16 &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#16 pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#17 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem crngs.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem free_vmap_area_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem vmap_area_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &____s->seqcount irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem init_mm.page_table_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem purge_vmap_area_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem purge_vmap_area_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#16 &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &x->wait#27 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem (console_sem).lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &x->wait#28 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem console_lock console_srcu console_owner_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem console_lock console_srcu console_owner irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem console_lock console_srcu console_owner &port_lock_key irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock &n->list_lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem krc.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#15 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#15 &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#15 pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock kernfs_idr_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &____s->seqcount#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock kernfs_idr_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem ib_mad_port_list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem kernfs_idr_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 crngs.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &xa->xa_lock#17 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &id_priv->qp_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &id_priv->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &xa->xa_lock#18 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &xa->xa_lock#18 pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &cm_id_priv->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &cm_id_priv->lock &cm.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &n->list_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &xa->xa_lock#17 pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem umad_ida.xa_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &x->wait#9 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem chrdevs_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &k->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex &k->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex lock kernfs_idr_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex &root->kernfs_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem bus_type_sem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem sysfs_symlink_target_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &dev->power.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem dpm_list_mtx irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem req_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &x->wait#11 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex nl_table_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex nl_table_wait.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex.wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem subsys mutex#84 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem subsys mutex#84 &k->k_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &____s->seqcount#2 irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &n->list_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem pcpu_alloc_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem pcpu_alloc_mutex pcpu_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uverbs_ida.xa_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem remove_cache_srcu irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem remove_cache_srcu quarantine_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem remove_cache_srcu &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem remove_cache_srcu &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem remove_cache_srcu pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#9 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#9 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem subsys mutex#85 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem subsys mutex#85 &k->k_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#17 pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem subsys mutex#86 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem subsys mutex#86 &k->k_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rds_ib_devices_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem ib_nodev_conns_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem smc_ib_devices.mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &device->event_handler_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &rcu_state.expedited_wq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &pnettable->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &pnettable->lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &pnettable->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex.wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock &pool->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rcu_state.expedited_wq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock &pdata->netdev_lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock rtnl_mutex irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock rtnl_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &xa->xa_lock#15 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &xa->xa_lock#15 pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &x->wait#9 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &k->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex gdp_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex gdp_mutex &k->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex bus_type_sem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &sem->wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex sysfs_symlink_target_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &dev->power.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex dpm_list_mtx irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex nl_table_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex subsys mutex#83 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex subsys mutex#83 &k->k_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &lock->wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rcu_read_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock (console_sem).lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock console_lock console_srcu console_owner irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &lock->wait_lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &table->rwlock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) smc_lgr_list.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &pdata->netdev_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock (console_sem).lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock console_lock console_srcu console_owner_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock console_lock console_srcu console_owner irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock console_lock console_srcu console_owner &port_lock_key irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &zone->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem rdma_nets_rwsem rdma_nets_rwsem.wait_lock irq_context: 0 pernet_ops_rwsem rdma_nets_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem rdma_nets_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &____s->seqcount#2 irq_context: 0 &wg->device_update_lock rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &n->list_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu quarantine_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &wg->device_update_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex batched_entropy_u8.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex kfence_freelist_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &pcp->lock &zone->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: softirq rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &____s->seqcount#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#7 &wb->work_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#7 &wb->work_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#7 &wb->work_lock &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#7 &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#7 key#12 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#7 key#13 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim &rcu_state.expedited_wq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex subsys mutex#17 &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &p->pi_lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) &meta->lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) kfence_freelist_lock irq_context: softirq rcu_read_lock hwsim_radio_lock init_task.mems_allowed_seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &base->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) init_task.mems_allowed_seq.seqcount irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rdma_nets_rwsem rdma_nets_rwsem.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rdma_nets_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rdma_nets_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem.wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rdma_nets_rwsem.wait_lock irq_context: 0 pernet_ops_rwsem &device->compat_devs_mutex irq_context: 0 pernet_ops_rwsem &device->compat_devs_mutex &xa->xa_lock#15 irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &device->compat_devs_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &device->compat_devs_mutex &xa->xa_lock#15 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &sem->wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex nl_table_wait.lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem pgd_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem stock_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem key irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem pcpu_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem percpu_counters_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem pcpu_lock stock_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu pool_lock#2 irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 &u->iolock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &meta->lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_node_0 irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &p->pi_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem &zone->lock irq_context: 0 pernet_ops_rwsem &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem quarantine_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem quarantine_lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock crngs.lock irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &sem->wait_lock irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &p->pi_lock irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 pernet_ops_rwsem subsys mutex#83 irq_context: 0 pernet_ops_rwsem subsys mutex#83 &k->k_lock irq_context: 0 pernet_ops_rwsem subsys mutex#83 &k->k_lock klist_remove_lock irq_context: 0 pernet_ops_rwsem &x->wait#9 irq_context: 0 pernet_ops_rwsem dpm_list_mtx irq_context: 0 pernet_ops_rwsem &dev->power.lock irq_context: 0 pernet_ops_rwsem deferred_probe_mutex irq_context: 0 pernet_ops_rwsem device_links_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem gdp_mutex irq_context: 0 pernet_ops_rwsem &device->unregistration_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &zone->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem subsys mutex#83 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem subsys mutex#83 &k->k_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem subsys mutex#83 &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &x->wait#9 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dpm_list_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->power.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem deferred_probe_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem device_links_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem gdp_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &device->unregistration_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&hsr->announce_timer) rcu_read_lock &n->list_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &meta->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 tomoyo_ss remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 tomoyo_ss remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: softirq rcu_read_lock hwsim_radio_lock &pcp->lock &zone->lock irq_context: softirq rcu_read_lock hwsim_radio_lock &pcp->lock &zone->lock &____s->seqcount irq_context: softirq (&ndev->rs_timer) &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_read_lock &stopper->lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_read_lock &stop_pi_lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_read_lock &stop_pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_read_lock &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &root->kernfs_rwsem pgd_lock irq_context: 0 &root->kernfs_rwsem stock_lock irq_context: 0 &root->kernfs_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &root->kernfs_rwsem key irq_context: 0 &root->kernfs_rwsem pcpu_lock irq_context: 0 &root->kernfs_rwsem percpu_counters_lock irq_context: 0 &root->kernfs_rwsem pcpu_lock stock_lock irq_context: 0 &root->kernfs_rwsem pool_lock#2 irq_context: 0 &sb->s_type->i_lock_key#16 irq_context: 0 &sb->s_type->i_lock_key#16 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#21 irq_context: 0 &sb->s_type->i_mutex_key#21 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#21 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#21 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#21 integrity_iint_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem console_lock console_srcu console_owner_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem console_lock console_srcu console_owner irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem console_lock console_srcu console_owner &port_lock_key irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem console_lock console_srcu console_owner console_owner_lock irq_context: 0 pernet_ops_rwsem lock kernfs_idr_lock &c->lock irq_context: 0 &iint->mutex rcu_read_lock rcu_node_0 irq_context: 0 &iint->mutex rcu_read_lock &rq->__lock irq_context: 0 &iint->mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &iint->mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &iint->mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &iint->mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 sb_writers#14 mount_lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 sb_writers#14 tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 sb_writers#14 &sb->s_type->i_lock_key#16 irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 sb_writers#14 &wb->list_lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 sb_writers#14 &wb->list_lock &sb->s_type->i_lock_key#16 irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 fs_reclaim irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 pool_lock#2 irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 hugetlb_lock irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key pool_lock#2 irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key &rq->__lock irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &resv_map->lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] hugetlb_lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] hugetlb_lock &____s->seqcount#2 irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &rq->__lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &lock->wait_lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] fs_reclaim irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] stock_lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] pool_lock#2 irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &anon_vma->rwsem irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &anon_vma->rwsem &mm->page_table_lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] ptlock_ptr(page) irq_context: 0 &mm->mmap_lock &lock->wait_lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &resv_map->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &resv_map->lock pool_lock#2 irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &nf_nat_locks[i] irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem &pdata->netdev_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock nl_table_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock nl_table_wait.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock lock#8 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock id_table_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock krc.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 rtnl_mutex &br->hash_lock &n->list_lock irq_context: 0 rtnl_mutex &br->hash_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 nfnl_grp_active_lock irq_context: 0 &mm->mmap_lock sb_writers#4 &sb->s_type->i_lock_key#22 irq_context: 0 &mm->mmap_lock sb_writers#4 &wb->list_lock irq_context: 0 &mm->mmap_lock sb_writers#4 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 &mm->mmap_lock sb_writers#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &mm->mmap_lock &obj_hash[i].lock irq_context: 0 &sighand->siglock &____s->seqcount#2 irq_context: 0 &sighand->siglock &____s->seqcount irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key &c->lock irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 &bridge_netdev_addr_lock_key irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 &bridge_netdev_addr_lock_key irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &xa->xa_lock#15 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &x->wait#9 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &k->list_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex gdp_mutex irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex gdp_mutex &k->list_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex bus_type_sem irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex sysfs_symlink_target_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &dev->power.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex dpm_list_mtx irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex nl_table_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex subsys mutex#83 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex subsys mutex#83 &k->k_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &pdata->netdev_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &br->multicast_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond2 irq_context: 0 (wq_completion)bond2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 &bridge_netdev_addr_lock_key irq_context: 0 rtnl_mutex rcu_read_lock &bond->ipsec_lock irq_context: 0 &sb->s_type->i_mutex_key#10 nfnl_grp_active_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &q->instances_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &log->instances_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex irq_context: softirq &(&bond->mcast_work)->timer irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key pool_lock#2 irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key ptlock_ptr(page) irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key &rq->__lock irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock hugetlb_lock irq_context: 0 &mm->mmap_lock &resv_map->lock irq_context: 0 rtnl_mutex remove_cache_srcu &____s->seqcount irq_context: 0 &mm->mmap_lock &resv_map->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &resv_map->lock pool_lock#2 irq_context: 0 &resv_map->lock irq_context: 0 integrity_iint_lock irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rcu_read_lock &sighand->siglock &____s->seqcount#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 &iint->mutex rcu_node_0 irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &____s->seqcount irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &mm->page_table_lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex.wait_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock (console_sem).lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock console_lock console_srcu console_owner_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock console_lock console_srcu console_owner irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &zone->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#21 &c->lock irq_context: 0 rtnl_mutex &br->multicast_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex &bond->mode_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock batched_entropy_u32.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &n->list_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &net->xfrm.xfrm_state_lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 krc.lock irq_context: 0 rtnl_mutex &bond->mode_lock &c->lock irq_context: 0 rtnl_mutex &bond->mode_lock pool_lock#2 irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex remove_cache_srcu pool_lock#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &sem->wait_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex (&br->hello_timer) irq_context: 0 rtnl_mutex (&br->topology_change_timer) irq_context: 0 rtnl_mutex (&br->tcn_timer) irq_context: 0 rtnl_mutex (work_completion)(&(&br->gc_work)->work) irq_context: 0 rtnl_mutex (&brmctx->ip4_mc_router_timer) irq_context: 0 rtnl_mutex (&brmctx->ip4_other_query.timer) irq_context: 0 rtnl_mutex (&brmctx->ip4_own_query.timer) irq_context: 0 rtnl_mutex (&brmctx->ip6_mc_router_timer) irq_context: 0 rtnl_mutex (&brmctx->ip6_other_query.timer) irq_context: 0 rtnl_mutex (&brmctx->ip6_own_query.timer) irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key &obj_hash[i].lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key krc.lock irq_context: 0 rtnl_mutex &im->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key krc.lock irq_context: 0 rtnl_mutex &tbl->lock pool_lock#2 irq_context: 0 rtnl_mutex &tbl->lock &n->lock irq_context: 0 rtnl_mutex &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 rtnl_mutex &tbl->lock nl_table_lock irq_context: 0 rtnl_mutex &tbl->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &tbl->lock nl_table_wait.lock irq_context: 0 rtnl_mutex &tbl->lock rcu_read_lock lock#8 irq_context: 0 rtnl_mutex &tbl->lock rcu_read_lock id_table_lock irq_context: 0 rtnl_mutex &tbl->lock &dir->lock#2 irq_context: 0 rtnl_mutex &tbl->lock krc.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &sem->wait_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 nl_table_lock nl_table_wait.lock irq_context: 0 rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &root->kernfs_rwsem &____s->seqcount irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex kfence_freelist_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu quarantine_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &n->list_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_node_0 irq_context: 0 &iint->mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &iint->mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &iint->mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &iint->mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex (work_completion)(&br->mcast_gc_work) irq_context: 0 rtnl_mutex rcu_state.barrier_mutex irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex &x->wait#24 irq_context: 0 rtnl_mutex rcu_state.barrier_mutex &rq->__lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss &rcu_state.expedited_wq irq_context: 0 rtnl_mutex (work_completion)(&ht->run_work) irq_context: 0 rtnl_mutex &ht->mutex irq_context: 0 rtnl_mutex &ht->mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &ht->mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 &c->lock irq_context: 0 &root->kernfs_rwsem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 rtnl_mutex &br->lock lweventlist_lock &c->lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key &n->list_lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key &n->list_lock &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rcu_read_lock &vma->vm_lock->lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &n->list_lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &n->list_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#2 irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem pgd_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem stock_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem key irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem pcpu_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem percpu_counters_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem pcpu_lock stock_lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock pool_lock#2 irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock batched_entropy_u8.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock kfence_freelist_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock &n->list_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &meta->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &ep->mtx gdp_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &iint->mutex &obj_hash[i].lock pool_lock irq_context: 0 &ep->mtx rcu_read_lock pool_lock#2 irq_context: 0 &root->kernfs_rwsem &sem->wait_lock irq_context: 0 &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET elock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback rcu_node_0 irq_context: 0 &ep->mtx gdp_mutex &c->lock irq_context: 0 &ep->mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 rtnl_mutex uevent_sock_mutex &____s->seqcount#2 irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock hugetlb_lock irq_context: 0 rtnl_mutex &br->hash_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond1#3 irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex &cfs_rq->removed.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &n->list_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &sem->wait_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim &rq->__lock irq_context: 0 &ep->mtx gdp_mutex &sem->wait_lock irq_context: 0 &ep->mtx gdp_mutex &p->pi_lock irq_context: 0 rtnl_mutex gdp_mutex &rq->__lock irq_context: 0 rtnl_mutex gdp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &fsnotify_mark_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &disk->open_mutex &lo->lo_mutex &rq->__lock irq_context: 0 &disk->open_mutex &lo->lo_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond2#2 irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 &____s->seqcount irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 &rq->__lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock &n->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock &n->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock rcu_read_lock lock#8 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock rcu_read_lock id_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock krc.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &bond->mode_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex krc.lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem &table->lock#4 &rq->__lock irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 rtnl_mutex subsys mutex#17 &rq->__lock irq_context: 0 rtnl_mutex subsys mutex#17 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &n->list_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &n->list_lock &c->lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &rq->__lock irq_context: 0 rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex.wait_lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond1#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu pool_lock#2 irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond3 irq_context: 0 (wq_completion)bond3 &rq->__lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock tcp_metrics_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock tcp_metrics_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock tcp_metrics_lock pool_lock#2 irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 vsock_table_lock irq_context: 0 sk_lock-AF_CAIF irq_context: 0 sk_lock-AF_CAIF &rq->__lock irq_context: 0 sk_lock-AF_CAIF &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_CAIF slock-AF_CAIF irq_context: 0 sk_lock-AF_CAIF &obj_hash[i].lock irq_context: 0 slock-AF_CAIF irq_context: 0 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG vsock_table_lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG vsock_table_lock clock-AF_VSOCK irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG &rq->__lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sock_diag_mutex sock_diag_table_mutex rcu_read_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 sk_lock-AF_VSOCK irq_context: 0 sk_lock-AF_VSOCK slock-AF_VSOCK irq_context: 0 slock-AF_VSOCK irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &____s->seqcount#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET clock-AF_INET irq_context: 0 sk_lock-AF_INET &sctp_port_hashtable[i].lock irq_context: 0 sk_lock-AF_INET &sctp_port_hashtable[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET crngs.lock irq_context: 0 sk_lock-AF_INET &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rhashtable_bucket irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET &asoc->wait irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override &c->lock irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock key#23 irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET krc.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET sctp_assocs_id_lock irq_context: 0 sk_lock-AF_INET batched_entropy_u32.lock crngs.lock irq_context: 0 sk_lock-AF_INET &list->lock#25 irq_context: 0 sk_lock-AF_INET krc.lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET krc.lock &base->lock irq_context: 0 sk_lock-AF_INET krc.lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &list->lock#25 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rhashtable_bucket irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 sctp_assocs_id_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 sctp_assocs_id_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 sctp_assocs_id_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 krc.lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 krc.lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 krc.lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 &sctp_port_hashtable[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 &sctp_port_hashtable[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 &sctp_port_hashtable[i].lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 &sctp_port_hashtable[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 &sctp_port_hashtable[i].lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)bond2#3 irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex fs_reclaim &cfs_rq->removed.lock irq_context: 0 rtnl_mutex fs_reclaim &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET &sctp_port_hashtable[i].lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET &sctp_port_hashtable[i].lock &____s->seqcount irq_context: 0 sk_lock-AF_INET &sctp_port_hashtable[i].lock &c->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &sbi->s_orphan_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &sbi->s_orphan_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond3#2 irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu pool_lock#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond2#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &meta->lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 fs_reclaim &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &root->kernfs_rwsem &p->pi_lock irq_context: softirq &(&br->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock batched_entropy_u32.lock irq_context: 0 ebt_mutex &rq->__lock irq_context: 0 ebt_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK slock-AF_VSOCK irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK vsock_table_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK clock-AF_VSOCK irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK rlock-AF_VSOCK irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_VSOCK irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_CAIF irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF slock-AF_CAIF irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF &this->info_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF (console_sem).lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF &ei->socket.wq.wait irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF clock-AF_CAIF irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF elock-AF_CAIF irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_CAIF irq_context: 0 &sb->s_type->i_mutex_key#10 elock-AF_CAIF irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &base->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond3#3 irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_IPGRE irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_IPGRE pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &meta->lock irq_context: 0 rtnl_mutex rcu_read_lock &ul->lock irq_context: 0 rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 rtnl_mutex nl_table_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock quarantine_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (reaper_work).work rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock rcu_read_lock &rq->__lock irq_context: 0 &p->lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_IPGRE irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &zone->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &zone->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &zone->lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &zone->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &zone->lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &zone->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &zone->lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh (console_sem).lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh console_lock console_srcu console_owner irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pcpu_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &zone->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &zone->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &zone->lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: softirq rcu_read_lock rcu_read_lock init_task.mems_allowed_seq.seqcount irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond3#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &zone->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &zone->lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle bit_wait_table + i irq_context: 0 namespace_sem rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 namespace_sem rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rtnl_mutex stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &base->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET (&tw->tw_timer) irq_context: 0 sk_lock-AF_INET rcu_read_lock fastopen_seqlock.seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_hook_mutex per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 rtnl_mutex (inet6addr_validator_chain).rwsem &rq->__lock irq_context: 0 rtnl_mutex (inet6addr_validator_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] fs_reclaim &rq->__lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 key#24 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 quarantine_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &lock->wait_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#4 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#4 &rcu_state.expedited_wq irq_context: 0 &type->i_mutex_dir_key#4 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_IPGRE &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_IPGRE &n->list_lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_IPGRE &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_node_0 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &tbl->lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &tbl->lock batched_entropy_u32.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &zone->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 &hashinfo->ehash_locks[i] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock fastopen_seqlock.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &sem->wait_lock irq_context: 0 sk_lock-AF_INET6 &p->pi_lock irq_context: 0 sk_lock-AF_INET6 &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 &pcp->lock &zone->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET irq_context: 0 rtnl_mutex sk_lock-AF_INET &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_INET slock-AF_INET irq_context: 0 rtnl_mutex sk_lock-AF_INET &mm->mmap_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET fs_reclaim irq_context: 0 rtnl_mutex sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex sk_lock-AF_INET pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &im->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &base->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex slock-AF_INET irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &zone->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock fs_reclaim irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock stock_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock ptlock_ptr(page) irq_context: 0 slock-AF_INET6 tk_core.seq.seqcount irq_context: 0 slock-AF_INET6 pool_lock#2 irq_context: 0 slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 slock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &base->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 sb_writers#14 &rq->__lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 sb_writers#14 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &lock->wait_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock &im->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem krc.lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fib_info_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fib_info_lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock &n->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock &n->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock nl_table_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock nl_table_wait.lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock rcu_read_lock lock#8 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock rcu_read_lock id_table_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock &dir->lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock krc.lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem class irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem (&tbl->proxy_timer) irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &base->lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &n->list_lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock nl_table_lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock nl_table_wait.lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock &data->fib_event_queue_lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &tbl->lock &c->lock irq_context: 0 rtnl_mutex &net->ipv6.addrconf_hash_lock &obj_hash[i].lock irq_context: softirq rcu_callback &c->lock irq_context: 0 rtnl_mutex &dev->tx_global_lock _xmit_LOOPBACK#2 irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &n->list_lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &n->list_lock &c->lock irq_context: 0 rtnl_mutex &br->hash_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &c->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 &base->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex sk_lock-AF_INET krc.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem rcu_node_0 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem &rnp->exp_wq[0] irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem &rnp->exp_wq[2] irq_context: 0 pernet_ops_rwsem &rnp->exp_wq[1] irq_context: 0 pernet_ops_rwsem &x->wait#10 irq_context: 0 pernet_ops_rwsem sysctl_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem sysctl_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock (work_completion)(flush) irq_context: 0 (wq_completion)events_highpri (work_completion)(flush) irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock &x->wait#10 irq_context: 0 (wq_completion)events_highpri (work_completion)(flush) &list->lock#5 irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_highpri (work_completion)(&barr->work) irq_context: 0 (wq_completion)events_highpri (work_completion)(&barr->work) &x->wait#10 irq_context: 0 (wq_completion)events_highpri (work_completion)(&barr->work) &x->wait#10 &p->pi_lock irq_context: 0 (wq_completion)events_highpri (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_highpri (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex device_links_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex &pnn->pndevs.lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &pnn->pndevs.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &zone->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &zone->lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &base->lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET remove_cache_srcu quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET remove_cache_srcu &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex raw_notifier_lock irq_context: 0 rtnl_mutex bcm_notifier_lock irq_context: 0 rtnl_mutex isotp_notifier_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &c->lock irq_context: 0 &u->iolock stock_lock irq_context: softirq &(&bat_priv->tt.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu &rq->__lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback stock_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rcu_read_lock rcu_read_lock pcpu_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &iint->mutex remove_cache_srcu irq_context: 0 &iint->mutex remove_cache_srcu quarantine_lock irq_context: 0 &iint->mutex remove_cache_srcu &c->lock irq_context: 0 &iint->mutex remove_cache_srcu &n->list_lock irq_context: 0 &iint->mutex remove_cache_srcu &rq->__lock irq_context: 0 &iint->mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &iint->mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &iint->mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &iint->mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &iint->mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex smc_ib_devices.mutex &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#21 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &pnettable->lock &rq->__lock irq_context: 0 rtnl_mutex &pnettable->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rcu_state.expedited_wq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override &c->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 cgroup_threadgroup_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &u->iolock rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex wq_mayday_lock irq_context: 0 lock pidmap_lock &____s->seqcount#2 irq_context: 0 &u->iolock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &u->iolock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &u->iolock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &cfs_rq->removed.lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 bit_wait_table + i irq_context: 0 &p->lock &of->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq (&timer) rcu_read_lock &n->list_lock irq_context: softirq (&timer) rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&timer) rcu_read_lock &____s->seqcount#2 irq_context: softirq (&timer) &txwq &p->pi_lock &rq->__lock irq_context: softirq (&timer) &txwq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 kn->active#5 &pcp->lock &zone->lock irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key nr_node_list_lock irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock &qdisc_xmit_lock_key irq_context: 0 &iint->mutex &rcu_state.expedited_wq irq_context: 0 &iint->mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &iint->mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &iint->mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &bond->stats_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback &____s->seqcount#2 irq_context: softirq rcu_callback &pcp->lock &zone->lock irq_context: softirq rcu_callback &pcp->lock &zone->lock &____s->seqcount irq_context: softirq rcu_callback &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 batched_entropy_u8.lock irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &meta->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &n->lock &____s->seqcount#9 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock nl_table_lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock nl_table_wait.lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock lock#8 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock id_table_lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &dir->lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock krc.lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &c->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem.wait_lock irq_context: 0 pernet_ops_rwsem devices_rwsem &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem rcu_node_0 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 fill_pool_map-wait-type-override pool_lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 &c->lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &child->perf_event_mutex rcu_node_0 irq_context: 0 &child->perf_event_mutex &rcu_state.expedited_wq irq_context: 0 &child->perf_event_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &child->perf_event_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &child->perf_event_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &child->perf_event_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &child->perf_event_mutex &cfs_rq->removed.lock irq_context: 0 &child->perf_event_mutex &obj_hash[i].lock irq_context: 0 &child->perf_event_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem rtnl_mutex &net->xdp.lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &net->xdp.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_node_0 irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &rq->__lock irq_context: 0 pernet_ops_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem gdp_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem gdp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &meta->lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock batched_entropy_u8.lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock kfence_freelist_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock &meta->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock kfence_freelist_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &rq->__lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock &c->lock irq_context: 0 pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock krc.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock krc.lock &base->lock irq_context: 0 pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &c->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu quarantine_lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu &c->lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu &n->list_lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &n->list_lock &c->lock irq_context: 0 namespace_sem pcpu_alloc_mutex.wait_lock irq_context: 0 namespace_sem &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[0] &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key &n->list_lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key &n->list_lock &c->lock irq_context: 0 rtnl_mutex mrt_lock irq_context: 0 rtnl_mutex mrt_lock pool_lock#2 irq_context: 0 rtnl_mutex mrt_lock &dir->lock#2 irq_context: 0 rtnl_mutex &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &dev->tx_global_lock _xmit_TUNNEL#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 &n->list_lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 &n->list_lock &c->lock irq_context: 0 napi_hash_lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &pnn->routes.lock &rq->__lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &pnn->routes.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock quarantine_lock irq_context: 0 lock pidmap_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex &mm->mmap_lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &fsnotify_mark_srcu fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &fsnotify_mark_srcu fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock pcpu_lock stock_lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex mrt_lock &c->lock irq_context: 0 rtnl_mutex mrt_lock &n->list_lock irq_context: 0 rtnl_mutex mrt_lock &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock batched_entropy_u8.lock crngs.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem &rnp->exp_wq[3] irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_node_0 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem pgd_lock irq_context: 0 pernet_ops_rwsem key irq_context: 0 pernet_ops_rwsem pcpu_lock stock_lock irq_context: 0 tomoyo_ss &rq->__lock &obj_hash[i].lock irq_context: 0 tomoyo_ss &rq->__lock &base->lock irq_context: 0 tomoyo_ss &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 krc.lock &obj_hash[i].lock irq_context: 0 krc.lock &base->lock irq_context: 0 krc.lock &base->lock &obj_hash[i].lock irq_context: 0 nfnl_subsys_ipset fs_reclaim irq_context: 0 nfnl_subsys_ipset fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nfnl_subsys_ipset &c->lock irq_context: 0 nfnl_subsys_ipset pool_lock#2 irq_context: 0 nfnl_subsys_ipset stock_lock irq_context: 0 nfnl_subsys_ipset crngs.lock irq_context: 0 nfnl_subsys_ipset rcu_state.barrier_mutex irq_context: 0 nfnl_subsys_ipset rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 nfnl_subsys_ipset rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 nfnl_subsys_ipset rcu_state.barrier_mutex &x->wait#24 irq_context: 0 nfnl_subsys_ipset rcu_state.barrier_mutex &rq->__lock irq_context: 0 nfnl_subsys_ipset rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rq->__lock &cfs_rq->removed.lock irq_context: 0 nfnl_subsys_ipset ip_set_ref_lock irq_context: 0 nfnl_subsys_ipset &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key quarantine_lock irq_context: 0 &xt[i].mutex rcu_read_lock &pool->lock irq_context: 0 &xt[i].mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &xt[i].mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &xt[i].mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &xa->xa_lock#7 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &mm->mmap_lock &xa->xa_lock#7 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) crngs.lock irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &xa->xa_lock#7 &n->list_lock &c->lock irq_context: 0 sb_writers#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &wb->work_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &wb->work_lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &wb->work_lock &base->lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 sb_internal mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &ht->mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem &ht->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &pcp->lock &zone->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work rcu_node_0 irq_context: 0 (wq_completion)events free_ipc_work &rcu_state.expedited_wq irq_context: 0 (wq_completion)events free_ipc_work &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &fsnotify_mark_srcu stock_lock irq_context: 0 &fsnotify_mark_srcu pcpu_lock stock_lock irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 lock irq_context: 0 sk_lock-AF_INET6 lock sctp_assocs_id_lock irq_context: 0 sk_lock-AF_INET6 lock sctp_assocs_id_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 l2tp_ip6_lock irq_context: 0 &mm->mmap_lock &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &rq->__lock irq_context: 0 &____s->seqcount#11 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET6 &ping_table.lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &nf_nat_locks[i] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &base->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &ndev->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock stock_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &ul->lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 &sb->s_type->i_mutex_key#10 &ping_table.lock irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock rcu_node_0 irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 lock sctp_assocs_id_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &n->list_lock &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex irq_context: 0 &net->xfrm.xfrm_cfg_mutex &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex fs_reclaim irq_context: 0 &net->xfrm.xfrm_cfg_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &net->xfrm.xfrm_cfg_mutex pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &n->list_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_node_0 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex rlock-AF_NETLINK irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&net->sctp.addr_wq_timer) &net->sctp.addr_wq_lock &base->lock irq_context: softirq (&net->sctp.addr_wq_timer) &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->mark_mutex rcu_read_lock rcu_node_0 irq_context: 0 &group->mark_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu pool_lock#2 irq_context: 0 &root->kernfs_rwsem &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &obj_hash[i].lock pool_lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &root->kernfs_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &meta->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh kfence_freelist_lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh pool_lock#2 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback kfence_freelist_lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &obj_hash[i].lock pool_lock irq_context: 0 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#4 &mm->mmap_lock mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 vlan_ioctl_mutex rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem key#22 irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &xa->xa_lock#7 key#10 irq_context: 0 sk_lock-AF_ALG (console_sem).lock irq_context: 0 sk_lock-AF_ALG console_lock console_srcu console_owner_lock irq_context: 0 sk_lock-AF_ALG console_lock console_srcu console_owner irq_context: 0 sk_lock-AF_ALG console_lock console_srcu console_owner &port_lock_key irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG console_lock console_srcu console_owner console_owner_lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] rcu_read_lock &rcu_state.expedited_wq irq_context: 0 vlan_ioctl_mutex rtnl_mutex.wait_lock irq_context: 0 vlan_ioctl_mutex &p->pi_lock irq_context: 0 vlan_ioctl_mutex &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &meta->lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &folio_wait_table[i] irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_ALG remove_cache_srcu irq_context: 0 sk_lock-AF_ALG remove_cache_srcu quarantine_lock irq_context: 0 sk_lock-AF_ALG remove_cache_srcu &c->lock irq_context: 0 sk_lock-AF_ALG remove_cache_srcu &n->list_lock irq_context: 0 sk_lock-AF_ALG remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_ALG remove_cache_srcu &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG remove_cache_srcu &rq->__lock irq_context: 0 sk_lock-AF_ALG remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG remove_cache_srcu pool_lock#2 irq_context: 0 sk_lock-AF_ALG remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_ALG remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 key#22 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock &c->lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu quarantine_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu &c->lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu &n->list_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu pool_lock#2 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex (console_sem).lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex console_lock console_srcu console_owner_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex console_lock console_srcu console_owner irq_context: 0 &net->xfrm.xfrm_cfg_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &net->xfrm.xfrm_cfg_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock irq_context: 0 ppp_mutex &n->list_lock &c->lock irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex &sem->wait_lock irq_context: 0 ppp_mutex rtnl_mutex &p->pi_lock irq_context: 0 ppp_mutex rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex &n->list_lock irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 ppp_mutex free_vmap_area_lock irq_context: 0 ppp_mutex free_vmap_area_lock &obj_hash[i].lock irq_context: 0 ppp_mutex free_vmap_area_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &____s->seqcount#2 irq_context: 0 ppp_mutex vmap_area_lock irq_context: 0 ppp_mutex pcpu_alloc_mutex irq_context: 0 ppp_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 ppp_mutex &obj_hash[i].lock irq_context: 0 ppp_mutex rcu_read_lock &pool->lock irq_context: 0 ppp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 ppp_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 ppp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 ppp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 ppp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 crypto_alg_sem &rq->__lock irq_context: 0 ppp_mutex rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex batched_entropy_u8.lock irq_context: 0 cb_lock genl_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 cb_lock genl_mutex kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem key#15 irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &wq->mutex &rq->__lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &wq->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex cpu_hotplug_lock &rq->__lock irq_context: softirq (&n->timer) &dir->lock irq_context: softirq (&n->timer) stock_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &____s->seqcount#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &____s->seqcount irq_context: 0 pernet_ops_rwsem wq_pool_mutex &wq->mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &lock->wait_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_state.barrier_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex purge_vmap_area_lock quarantine_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_node_0 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex rcu_node_0 irq_context: 0 ppp_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 ppp_mutex purge_vmap_area_lock irq_context: 0 ppp_mutex purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 ppp_mutex purge_vmap_area_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle bit_wait_table + i irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 quarantine_lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &c->lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex pool_lock#2 irq_context: 0 &xt[i].mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rcu_read_lock krc.lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock &rnp->exp_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rcu_state.exp_mutex irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 cb_lock genl_mutex hwsim_phys_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock krc.lock irq_context: 0 vlan_ioctl_mutex.wait_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock &wq->mutex irq_context: 0 cb_lock genl_mutex hwsim_phys_lock &wq->mutex &pool->lock/1 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock &wq->mutex &x->wait#10 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 netpoll_srcu irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 net_rwsem irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 net_rwsem &list->lock#2 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &pn->hash_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &tn->lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &dev->tx_global_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &dev->tx_global_lock _xmit_NETROM irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &dev->tx_global_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &sch->q.lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &sch->q.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex net_rwsem irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &wq->mutex irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &wq->mutex &pool->lock/1 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &wq->mutex &x->wait#10 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 fs_reclaim irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 pool_lock#2 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &c->lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 nl_table_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rlock-AF_NETLINK irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rcu_read_lock &ei->socket.wq.wait irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 nl_table_wait.lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 __ip_vs_mutex irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 __ip_vs_mutex &ipvs->dest_trash_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 netlbl_unlhsh_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &rdev->dev_wait irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &im->lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 krc.lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &tbl->lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 class irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 (&tbl->proxy_timer) irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &base->lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 flowtable_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &dir->lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rcu_read_lock &pool->lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rcu_read_lock &tb->tb6_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 nr_list_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 nr_neigh_list_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &net->ipv6.fib6_gc_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 dev_base_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 cpu_hotplug_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 cpu_hotplug_lock &list->lock#5 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &dir->lock#2 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 bpf_devs_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 sk_lock-AF_RDS irq_context: 0 sk_lock-AF_RDS &rq->__lock irq_context: 0 sk_lock-AF_RDS &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &in_dev->mc_tomb_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 sysctl_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 sysctl_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 sysctl_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 sysctl_lock krc.lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &ul->lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &net->xdp.lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 mirred_list_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &nft_net->commit_mutex irq_context: 0 sk_lock-AF_RDS slock-AF_RDS irq_context: 0 slock-AF_RDS irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &pnn->pndevs.lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &pnn->routes.lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &pnettable->lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 smc_ib_devices.mutex irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 proto_tab_lock raw_sk_list.lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 proc_subdir_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &ent->pde_unload_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 proc_inum_ida.xa_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &net->ipv6.addrconf_hash_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &ndev->lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &ndev->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &idev->mc_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &idev->mc_query_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &idev->mc_query_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 (work_completion)(&(&idev->mc_report_work)->work) irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &idev->mc_report_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &idev->mc_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &idev->mc_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &idev->mc_lock krc.lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 devices_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &____s->seqcount irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 target_list_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &dev_addr_list_lock_key#5 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &root->kernfs_rwsem &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &root->kernfs_rwsem pool_lock#2 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 uevent_sock_mutex irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 uevent_sock_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 uevent_sock_mutex &c->lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 uevent_sock_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 uevent_sock_mutex nl_table_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 uevent_sock_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 uevent_sock_mutex nl_table_wait.lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 sysfs_symlink_target_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &k->list_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 kernfs_idr_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 dev_hotplug_mutex irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 dev_hotplug_mutex &dev->power.lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 dev_pm_qos_sysfs_mtx irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 dev_pm_qos_sysfs_mtx &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 dev_pm_qos_sysfs_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 subsys mutex#17 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 subsys mutex#17 &k->k_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 subsys mutex#17 &k->k_lock klist_remove_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &x->wait#9 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 dpm_list_mtx irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &dev->power.lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 deferred_probe_mutex irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 device_links_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 gdp_mutex irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 cpu_hotplug_lock xps_map_mutex irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 cpu_hotplug_lock xps_map_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 cpu_hotplug_lock xps_map_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &n->list_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 _xmit_IEEE802154 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 net_rwsem &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 uevent_sock_mutex &____s->seqcount irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 uevent_sock_mutex rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 uevent_sock_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &sem->wait_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &p->pi_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &root->kernfs_rwsem &sem->wait_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 dev_hotplug_mutex &k->k_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &k->k_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 &k->k_lock klist_remove_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 gdp_mutex sysfs_symlink_target_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 gdp_mutex &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 gdp_mutex &sem->wait_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 gdp_mutex &p->pi_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 gdp_mutex &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 gdp_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 gdp_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 gdp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 gdp_mutex &k->list_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 gdp_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &local->iflist_mtx#2 gdp_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex.wait_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock &p->pi_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rcu_state.barrier_mutex irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rcu_state.barrier_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 sk_lock-AF_NETLINK irq_context: 0 &pipe->mutex/1 sk_lock-AF_NETLINK slock-AF_NETLINK irq_context: 0 &pipe->mutex/1 sk_lock-AF_NETLINK rcu_read_lock rhashtable_bucket irq_context: 0 &pipe->mutex/1 slock-AF_NETLINK irq_context: 0 &pipe->mutex/1 free_vmap_area_lock irq_context: 0 &pipe->mutex/1 vmap_area_lock irq_context: 0 &pipe->mutex/1 init_mm.page_table_lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &c->lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &zone->lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &____s->seqcount irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh &zone->lock irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh &zone->lock &____s->seqcount irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 &pipe->mutex/1 purge_vmap_area_lock irq_context: 0 &pipe->mutex/1 fs_reclaim &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rcu_state.barrier_mutex &x->wait#24 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock dev_base_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock lweventlist_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock &dir->lock#2 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock &dir->lock#2 &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock &dir->lock#2 pool_lock#2 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock netdev_unregistering_wq.lock irq_context: 0 rtnl_mutex &ipvlan->addrs_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock wq_mayday_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock &x->wait irq_context: 0 cb_lock genl_mutex hwsim_phys_lock wq_pool_mutex irq_context: 0 cb_lock genl_mutex hwsim_phys_lock wq_pool_mutex &wq->mutex irq_context: 0 cb_lock genl_mutex hwsim_phys_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock &pool->lock/1 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->alloc_lock &x->wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#3/2 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#3/2 &dev_addr_list_lock_key/1 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#3/2 &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER irq_context: 0 vlan_ioctl_mutex vlan_ioctl_mutex.wait_lock irq_context: 0 vlan_ioctl_mutex &rq->__lock irq_context: 0 vlan_ioctl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &ipvlan->addrs_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &ipvlan->addrs_lock pool_lock#2 irq_context: 0 rtnl_mutex &ipvlan->addrs_lock krc.lock irq_context: 0 rtnl_mutex &dev->tx_global_lock &qdisc_xmit_lock_key#2 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#3/2 &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#3/2 pool_lock#2 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#3/2 krc.lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &____s->seqcount irq_context: 0 rtnl_mutex &net->ipv6.addrconf_hash_lock &base->lock irq_context: 0 rtnl_mutex &net->ipv6.addrconf_hash_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3/2 irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3/2 &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3/2 pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3/2 krc.lock irq_context: 0 rtnl_mutex (work_completion)(&port->wq) irq_context: 0 vlan_ioctl_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tn->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dir->lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex fs_reclaim irq_context: 0 vlan_ioctl_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 vlan_ioctl_mutex rtnl_mutex remove_cache_srcu irq_context: 0 vlan_ioctl_mutex rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex nl_table_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex nl_table_wait.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex netpoll_srcu irq_context: 0 vlan_ioctl_mutex rtnl_mutex &pn->hash_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dev->tx_global_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dev->tx_global_lock &vlan_netdev_xmit_lock_key irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dev->tx_global_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &sch->q.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex lweventlist_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex lweventlist_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex lweventlist_lock &dir->lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &base->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex __ip_vs_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex __ip_vs_mutex &ipvs->dest_trash_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &im->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex fib_info_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex fib_info_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex fib_info_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex class irq_context: 0 vlan_ioctl_mutex rtnl_mutex (&tbl->proxy_timer) irq_context: 0 vlan_ioctl_mutex rtnl_mutex flowtable_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dir->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rt6_exception_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock nl_table_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock nl_table_wait.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock &data->fib_event_queue_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &obj_hash[i].lock pool_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock &n->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock &n->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock nl_table_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock nl_table_wait.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock rcu_read_lock lock#8 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock rcu_read_lock id_table_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock &dir->lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &net->ipv6.addrconf_hash_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &net->ipv6.addrconf_hash_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ndev->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ndev->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ndev->lock &base->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ndev->lock &base->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ifa->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &base->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &base->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tb->tb6_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &dir->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_query_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_query_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) irq_context: 0 vlan_ioctl_mutex rtnl_mutex &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &net->ipv6.fib6_gc_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_base_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex cpu_hotplug_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex cpu_hotplug_lock &list->lock#5 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 vlan_ioctl_mutex rtnl_mutex bpf_devs_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &hwstats->hwsdev_list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex batched_entropy_u8.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex kfence_freelist_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &in_dev->mc_tomb_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &data->fib_event_queue_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem fs_reclaim irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem nl_table_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem nl_table_wait.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem fib_info_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &tbl->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem class irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem (&tbl->proxy_timer) irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &base->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex sysctl_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex sysctl_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex sysctl_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex sysctl_lock krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ul->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &net->xdp.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex mirred_list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &nft_net->commit_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex proc_subdir_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ent->pde_unload_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_report_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &pnn->pndevs.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &pnn->routes.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &pnettable->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex smc_ib_devices.mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex target_list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex sysfs_symlink_target_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex kernfs_idr_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &k->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &____s->seqcount irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_hotplug_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 vlan_ioctl_mutex rtnl_mutex subsys mutex#17 irq_context: 0 vlan_ioctl_mutex rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex subsys mutex#17 &k->k_lock klist_remove_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &x->wait#9 irq_context: 0 vlan_ioctl_mutex rtnl_mutex dpm_list_mtx irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dev->power.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex deferred_probe_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex device_links_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex mmu_notifier_invalidate_range_start irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 vlan_ioctl_mutex rtnl_mutex gdp_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex cpu_hotplug_lock xps_map_mutex irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex &rq->__lock irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex dev_pm_qos_sysfs_mtx irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex kernfs_idr_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &k->k_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &k->k_lock klist_remove_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &k->list_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex sysfs_symlink_target_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex subsys mutex#55 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex subsys mutex#55 &k->k_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex subsys mutex#55 &k->k_lock klist_remove_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &x->wait#9 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex dpm_list_mtx irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex &dev->power.lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex deferred_probe_mutex irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex device_links_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex uevent_sock_mutex irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_phys_lock rtnl_mutex gdp_mutex irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex krc.lock irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex &x->wait#24 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &cfs_rq->removed.lock irq_context: 0 vlan_ioctl_mutex dev_base_lock irq_context: 0 vlan_ioctl_mutex lweventlist_lock irq_context: 0 vlan_ioctl_mutex lweventlist_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex lweventlist_lock &dir->lock#2 irq_context: 0 vlan_ioctl_mutex pcpu_lock irq_context: 0 vlan_ioctl_mutex pool_lock#2 irq_context: 0 vlan_ioctl_mutex &dir->lock#2 irq_context: 0 vlan_ioctl_mutex &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex krc.lock irq_context: 0 vlan_ioctl_mutex &dir->lock#2 &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex &dir->lock#2 pool_lock#2 irq_context: 0 vlan_ioctl_mutex netdev_unregistering_wq.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &tbl->lock irq_context: softirq rcu_read_lock rcu_read_lock &n->lock &(&n->ha_lock)->lock irq_context: softirq rcu_read_lock rcu_read_lock &n->lock &(&n->ha_lock)->lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock lock#8 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock id_table_lock irq_context: softirq rcu_read_lock rcu_read_lock &n->lock irq_context: softirq rcu_read_lock rcu_read_lock &n->lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock nl_table_lock irq_context: softirq rcu_read_lock rcu_read_lock nl_table_wait.lock irq_context: softirq rcu_read_lock rcu_read_lock &ul->lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &base->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &(&n->ha_lock)->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &(&n->ha_lock)->lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock lock#8 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock id_table_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock nl_table_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock nl_table_wait.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 (&req->rsk_timer) irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &base->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &queue->rskq_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock tcp_metrics_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock tcp_metrics_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock tcp_metrics_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 rcu_read_lock &pool->lock/1 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) fs_reclaim irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &sb->s_type->i_lock_key#8 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &queue->rskq_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-slock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 clock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &base->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &dir->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 fs_reclaim irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) once_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) once_lock crngs.lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &____s->seqcount#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &____s->seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &n->list_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &n->list_lock &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_cong_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem fs_reclaim irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem crngs.lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem &id_priv->handler_mutex irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem &id_priv->handler_mutex &id_priv->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem id_table_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem &x->wait#29 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock rds_tcp_conn_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock rds_conn_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock rds_conn_lock rds_cong_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock clock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock clock-AF_INET6 rds_tcp_tc_list_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock &cp->cp_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &xa->xa_lock#7 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &fsnotify_mark_srcu irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-slock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 fs_reclaim irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &____s->seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 tk_core.seq.seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &base->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 &cp->cp_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &cp->cp_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 &rm->m_rs_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_recv_w)->work) irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_recv_w)->work) k-sk_lock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_recv_w)->work) k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_recv_w)->work) k-sk_lock-AF_INET6 tk_core.seq.seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_recv_w)->work) k-slock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &list->lock#27 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &cp->cp_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &sd->defer_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &base->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 fill_pool_map-wait-type-override &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 fill_pool_map-wait-type-override &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &fsnotify_mark_srcu &____s->seqcount#2 irq_context: 0 &fsnotify_mark_srcu &____s->seqcount irq_context: softirq (&icsk->icsk_retransmit_timer) k-slock-AF_INET6 irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#3/2 &c->lock irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu pool_lock#2 irq_context: 0 &pipe->mutex/1 &pipe->wr_wait &p->pi_lock irq_context: 0 &pipe->mutex/1 &pipe->wr_wait &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &pipe->wr_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 rcu_read_lock &sighand->siglock irq_context: 0 &pipe->mutex/1 &sighand->siglock irq_context: 0 &pipe->mutex/1 &sighand->siglock stock_lock irq_context: 0 &pipe->mutex/1 &sighand->siglock &____s->seqcount irq_context: 0 &pipe->mutex/1 &sighand->siglock pool_lock#2 irq_context: 0 &pipe->mutex/1 &sighand->siglock rcu_read_lock pool_lock#2 irq_context: 0 &pipe->mutex/1 &sighand->siglock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 &sighand->siglock &p->pi_lock irq_context: 0 &pipe->mutex/1 &sighand->siglock &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &sighand->siglock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tasklist_lock &sighand->siglock stock_lock irq_context: 0 tasklist_lock &sighand->siglock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 raw_sk_list.lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3 &c->lock irq_context: 0 vlan_ioctl_mutex &cfs_rq->removed.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER (console_sem).lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER console_lock console_srcu console_owner_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER console_lock console_srcu console_owner irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER console_lock console_srcu console_owner &port_lock_key irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER console_lock console_srcu console_owner console_owner_lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock &sem->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &____s->seqcount#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &rnp->exp_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex pgd_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex stock_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex key irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex pcpu_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex percpu_counters_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex pcpu_lock stock_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 &sighand->siglock &n->list_lock irq_context: 0 &sighand->siglock &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ul->lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ul->lock#2 pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ul->lock#2 &dir->lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &sem->wait_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->notification_waitq &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex kernfs_idr_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex kernfs_idr_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &app->lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (&app->join_timer)#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (&app->periodic_timer) irq_context: 0 vlan_ioctl_mutex rtnl_mutex &list->lock#15 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (&app->join_timer) irq_context: 0 vlan_ioctl_mutex rtnl_mutex &app->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &list->lock#14 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &base->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &cp->cp_cm_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 clock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 clock-AF_INET6 rds_tcp_tc_list_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-slock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 k-slock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 k-slock-AF_INET6 elock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &dir->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &sb->s_type->i_lock_key#8 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &xa->xa_lock#7 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &fsnotify_mark_srcu irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &list->lock#27 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &cp->cp_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) (work_completion)(&(&cp->cp_conn_w)->work) irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_node_0 irq_context: 0 proto_tab_lock &n->list_lock irq_context: 0 proto_tab_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)gid-cache-wq &rq->__lock irq_context: 0 (wq_completion)gid-cache-wq &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &c->lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock fs_reclaim &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 key#25 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock krc.lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock krc.lock &base->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_node_0 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem fib_info_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem fib_info_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock krc.lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock krc.lock &base->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &____s->seqcount#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 vlan_ioctl_mutex &meta->lock irq_context: 0 vlan_ioctl_mutex kfence_freelist_lock irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock fs_reclaim irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock &____s->seqcount irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock stock_lock irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu &c->lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu &n->list_lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu pool_lock#2 irq_context: 0 &mm->mmap_lock quarantine_lock irq_context: 0 (wq_completion)bond11 irq_context: 0 (wq_completion)bond11 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond11 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &n->list_lock &c->lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &c->lock irq_context: 0 link_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key &n->list_lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key &n->list_lock &c->lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond11 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond11 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 uts_sem &rq->__lock irq_context: 0 uts_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_hotplug_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bat_priv->dat.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 sb_writers#4 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bond4 irq_context: 0 (wq_completion)bond4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex fs_reclaim &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)bond5 irq_context: 0 (wq_completion)bond5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 &____s->seqcount#2 irq_context: 0 (wq_completion)bond13 irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock genl_mutex &rcu_state.expedited_wq irq_context: 0 cb_lock genl_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cb_lock genl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_node_0 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &obj_hash[i].lock pool_lock irq_context: softirq (&ndev->rs_timer) &ndev->lock fill_pool_map-wait-type-override &c->lock irq_context: softirq (&ndev->rs_timer) &ndev->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) &ndev->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex &pool->lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key &____s->seqcount#2 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key &____s->seqcount irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond5#2 irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock rcu_read_lock stock_lock irq_context: 0 &mm->mmap_lock rcu_read_lock pcpu_lock stock_lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond14 irq_context: 0 (wq_completion)bond14 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond14 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond6 irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &pcp->lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 &n->list_lock &c->lock irq_context: 0 (wq_completion)bond3#4 irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond6#2 irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->queue_stop_reason_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_node_0 irq_context: 0 rtnl_mutex &p->alloc_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->ampdu_mlme.mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->ampdu_mlme.mtx &sta->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->ampdu_mlme.work) irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock rhashtable_bucket irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx krc.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->key_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx fs_reclaim irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &fq->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &c->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &n->list_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &n->list_lock &c->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx nl_table_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx nl_table_wait.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx pin_fs_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &dentry->d_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &xa->xa_lock#7 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &dentry->d_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &fsnotify_mark_srcu irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_lock_key#7 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &s->s_inode_list_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &xa->xa_lock#7 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock mount_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx mount_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx mount_lock mount_lock.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->active_txq_lock[i] irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->drv_deliver_wk) irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->drv_deliver_wk) &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->drv_deliver_wk) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->chanctx_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &ifibss->incomplete_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx lweventlist_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx lweventlist_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx krc.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx hrtimer_bases.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &data->mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &local->queue_stop_reason_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &fq->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx krc.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &list->lock#18 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx (&ifibss->timer) irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &base->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &____s->seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &list->lock#2 irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex rcu_node_0 irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex _xmit_ETHER &obj_hash[i].lock irq_context: 0 rtnl_mutex _xmit_ETHER krc.lock irq_context: 0 rcu_state.barrier_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond15 irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 &tsk->futex_exit_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->sta_mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER &local->filter_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER &local->filter_lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER &local->filter_lock pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER &local->filter_lock krc.lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (&local->dynamic_ps_timer) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&local->dynamic_ps_enable_work) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&local->dynamic_ps_enable_work) &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&local->dynamic_ps_enable_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&sdata->recalc_smps) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&link->csa_finalize_work) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&link->color_change_finalize_work) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&(&link->dfs_cac_timer_work)->work) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&n->timer) rcu_read_lock lock#8 irq_context: softirq (&n->timer) &n->lock &____s->seqcount#9 irq_context: 0 &xt[i].mutex batched_entropy_u8.lock crngs.lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &list->lock#18 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &cfs_rq->removed.lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->filter_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &xa->xa_lock#3 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 stock_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &fq->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond7 irq_context: 0 (wq_completion)bond7 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond7 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond4#2 irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 &obj_hash[i].lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) &p->pi_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &mm->mmap_lock &anon_vma->rwsem pgd_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem stock_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem key irq_context: 0 &mm->mmap_lock &anon_vma->rwsem pcpu_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem percpu_counters_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem pcpu_lock stock_lock irq_context: 0 (wq_completion)bond5 &rq->__lock irq_context: 0 (wq_completion)bond5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu pool_lock#2 irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu &rq->__lock irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond15 &rq->__lock irq_context: 0 (wq_completion)bond15 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &iint->mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond16 irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rq->__lock &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rq->__lock &base->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond8 irq_context: 0 (wq_completion)bond8 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond8 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &tsk->futex_exit_mutex &rq->__lock irq_context: 0 &sig->cred_guard_mutex &tsk->futex_exit_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock rcu_read_lock lock#8 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock quarantine_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) rcu_node_0 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock quarantine_lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond7 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock krc.lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock krc.lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock krc.lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock krc.lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock krc.lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock krc.lock &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock kfence_freelist_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &meta->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &____s->seqcount#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &____s->seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->chanctx_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->wiphy_work_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex &lock->wait_lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &ifibss->incomplete_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &list->lock#18 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx (&ifibss->timer) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &base->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &base->lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->key_mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_node_0 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_read_lock &rq->__lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock &base->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem &table->lock#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key &____s->seqcount#2 irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key &____s->seqcount irq_context: 0 (wq_completion)bond17 irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 &resv_map->lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 remove_cache_srcu &base->lock irq_context: 0 remove_cache_srcu &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond3#5 irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) &rq->__lock irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bond9 irq_context: 0 (wq_completion)bond9 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond9 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &iint->mutex batched_entropy_u8.lock irq_context: 0 &iint->mutex kfence_freelist_lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &base->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem &rq->__lock irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &ht->mutex &rq->__lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex uevent_sock_mutex uevent_sock_mutex.wait_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount#2 irq_context: 0 (wq_completion)bond18 irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key krc.lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 isotp_notifier_lock irq_context: 0 sk_lock-AF_CAN irq_context: 0 sk_lock-AF_CAN slock-AF_CAN irq_context: 0 sk_lock-AF_CAN &mm->mmap_lock irq_context: 0 slock-AF_CAN irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET free_vmap_area_lock irq_context: 0 sk_lock-AF_INET vmap_area_lock irq_context: 0 sk_lock-AF_INET pcpu_alloc_mutex irq_context: 0 sk_lock-AF_INET pcpu_alloc_mutex pcpu_lock irq_context: 0 sk_lock-AF_INET pack_mutex irq_context: 0 sk_lock-AF_INET text_mutex irq_context: 0 sk_lock-AF_INET text_mutex ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_INET &fp->aux->used_maps_mutex irq_context: 0 sk_lock-AF_INET &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET &base->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock stock_lock irq_context: 0 (wq_completion)bond4#3 irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 (wq_completion)bond5#3 irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond10 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET batched_entropy_u32.lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 sk_lock-AF_INET slock-AF_INET &sk->sk_lock.wq irq_context: 0 sk_lock-AF_INET &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 slock-AF_INET &sk->sk_lock.wq irq_context: 0 slock-AF_INET &sk->sk_lock.wq &p->pi_lock irq_context: 0 slock-AF_INET &sk->sk_lock.wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 slock-AF_INET &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 slock-AF_INET &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET &mm->mmap_lock ptlock_ptr(page)#2 key irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond19 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET &sb->s_type->i_lock_key#8 irq_context: 0 sk_lock-AF_INET &dir->lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 pool_lock#2 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 &dir->lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 fs_reclaim irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET k-slock-AF_INET irq_context: 0 sk_lock-AF_INET k-clock-AF_INET irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &____s->seqcount#8 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET batched_entropy_u32.lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock k-clock-AF_INET irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &h->lhash2[i].lock irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key &meta->lock irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key kfence_freelist_lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG remove_cache_srcu irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG remove_cache_srcu quarantine_lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG remove_cache_srcu &c->lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG remove_cache_srcu &n->list_lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG remove_cache_srcu &obj_hash[i].lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex rcu_read_lock &h->lhash2[i].lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex rcu_read_lock &rq->__lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex rcu_read_lock rcu_node_0 irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 &queue->rskq_lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG rcu_read_lock pool_lock#2 irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG rlock-AF_NETLINK irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 &h->lhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 &tcp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &sb->s_type->i_lock_key#8 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &xa->xa_lock#7 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &fsnotify_mark_srcu irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-clock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET elock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &msk->pm.lock irq_context: 0 (wq_completion)bond11#2 irq_context: 0 (wq_completion)bond11#2 &rq->__lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 isotp_notifier_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex pool_lock#2 irq_context: 0 (wq_completion)bond6#3 irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal batched_entropy_u8.lock irq_context: 0 sb_writers#4 sb_internal kfence_freelist_lock irq_context: 0 sb_writers#4 sb_internal &meta->lock irq_context: 0 ebt_mutex &mm->mmap_lock &rq->__lock irq_context: 0 ebt_mutex &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 &c->lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#21 remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#21 remove_cache_srcu quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#21 remove_cache_srcu &c->lock irq_context: 0 &sb->s_type->i_mutex_key#21 remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#21 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#21 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#21 remove_cache_srcu pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#21 remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#21 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#21 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#21 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key &____s->seqcount#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &sighand->siglock irq_context: 0 sk_lock-AF_INET &sighand->siglock irq_context: 0 sk_lock-AF_INET &sighand->siglock stock_lock irq_context: 0 sk_lock-AF_INET &sighand->siglock pool_lock#2 irq_context: 0 sk_lock-AF_INET &sighand->siglock &p->pi_lock irq_context: 0 sk_lock-AF_INET &sighand->siglock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET &sighand->siglock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 &rq->__lock irq_context: 0 sk_lock-AF_CAN &rq->__lock irq_context: 0 sk_lock-AF_INET &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond20 irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#10 kfence_freelist_lock irq_context: 0 sk_lock-AF_INET &sighand->siglock &c->lock irq_context: 0 sk_lock-AF_INET &sighand->siglock &n->list_lock irq_context: 0 sk_lock-AF_INET &sighand->siglock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond12 irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond16 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 fill_pool_map-wait-type-override batched_entropy_u8.lock irq_context: 0 fill_pool_map-wait-type-override kfence_freelist_lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond14 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond14 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 &iint->mutex remove_cache_srcu pool_lock#2 irq_context: 0 &mm->mmap_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond3#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)bond21 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq (&peer->timer_new_handshake) irq_context: softirq (&peer->timer_new_handshake) &peer->endpoint_lock irq_context: softirq (&peer->timer_new_handshake) &peer->endpoint_lock &obj_hash[i].lock irq_context: softirq (&peer->timer_new_handshake) &peer->endpoint_lock pool_lock#2 irq_context: softirq (&peer->timer_new_handshake) rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&peer->timer_new_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: softirq (&peer->timer_new_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&peer->timer_new_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq (&peer->timer_new_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq (&peer->timer_new_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq (&peer->timer_new_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)bond13#2 irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock &____s->seqcount irq_context: 0 (wq_completion)bond6#4 irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] rcu_node_0 irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &____s->seqcount#2 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &p->lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex wq_pool_mutex &wq->mutex &rq->__lock irq_context: 0 rtnl_mutex wq_pool_mutex &wq->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock &p->pi_lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] remove_cache_srcu irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] remove_cache_srcu quarantine_lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] remove_cache_srcu &c->lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] remove_cache_srcu &n->list_lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] remove_cache_srcu &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] remove_cache_srcu &rq->__lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)bond22 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond7 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond7 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 rtnl_mutex &net->ipv6.fib6_gc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &net->ipv6.fib6_gc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond7#2 irq_context: 0 (wq_completion)bond7#2 &rq->__lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_node_0 irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &br->hash_lock nl_table_wait.lock &p->pi_lock irq_context: 0 rtnl_mutex &br->hash_lock nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &br->hash_lock nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond14#2 irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#21 &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#21 &____s->seqcount irq_context: 0 (wq_completion)bond23 irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &mm->mmap_lock &anon_vma->rwsem fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &____s->seqcount#2 irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) rcu_node_0 irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) &rq->__lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond8#2 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock batched_entropy_u8.lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &meta->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex remove_cache_srcu irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex remove_cache_srcu pool_lock#2 irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex remove_cache_srcu &rq->__lock irq_context: 0 loop_validate_mutex &lo->lo_mutex &rq->__lock irq_context: 0 loop_validate_mutex &lo->lo_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &fsnotify_mark_srcu &n->list_lock irq_context: 0 &fsnotify_mark_srcu &n->list_lock &c->lock irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock &c->lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_node_0 irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#3 irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 rtnl_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 &____s->seqcount irq_context: 0 sk_lock-AF_PACKET &rnp->exp_lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 sk_lock-AF_PACKET &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rnp->exp_lock irq_context: 0 &p->lock &of->mutex kn->active#5 rcu_read_lock rcu_node_0 irq_context: 0 &p->lock &of->mutex kn->active#5 rcu_read_lock &rq->__lock irq_context: 0 &p->lock &of->mutex kn->active#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &obj_hash[i].lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond9#2 irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond16#2 irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond9 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond9 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond3#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 &fsnotify_mark_srcu batched_entropy_u8.lock irq_context: 0 &fsnotify_mark_srcu kfence_freelist_lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &obj_hash[i].lock irq_context: 0 sb_writers#8 kn->active#5 fs_reclaim &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 kn->active#5 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex bpf_devs_lock rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex bpf_devs_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->mark_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 cb_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#58 fs_reclaim irq_context: 0 kn->active#58 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#58 stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond8 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond8 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 sb_writers#5 &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#58 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#58 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#58 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &iint->mutex fs_reclaim &rq->__lock irq_context: 0 &iint->mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock &of->mutex kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex key#26 irq_context: 0 &p->lock &of->mutex kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock pidmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &meta->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &journal->j_state_lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 kn->active#58 &c->lock irq_context: 0 kn->active#58 &____s->seqcount#2 irq_context: 0 kn->active#58 &____s->seqcount irq_context: 0 kn->active#58 &n->list_lock irq_context: 0 kn->active#58 &n->list_lock &c->lock irq_context: 0 kn->active#58 &rq->__lock irq_context: 0 kn->active#58 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &rq->__lock &cfs_rq->removed.lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 tasklist_lock rcu_read_lock &p->pi_lock irq_context: 0 tasklist_lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 tasklist_lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 batched_entropy_u8.lock irq_context: 0 sb_writers#8 kfence_freelist_lock irq_context: 0 sb_writers#8 &meta->lock irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_node_0 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock genl_mutex fs_reclaim &rq->__lock irq_context: 0 cb_lock genl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 fs_reclaim &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &dentry->d_lock &p->pi_lock irq_context: 0 rcu_read_lock &dentry->d_lock &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &dentry->d_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex wq_pool_mutex &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock fs_reclaim &rq->__lock irq_context: 0 cb_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &iint->mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 &p->lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)rcu_gp rcu_node_0 irq_context: 0 (wq_completion)rcu_gp &rcu_state.expedited_wq irq_context: 0 (wq_completion)rcu_gp &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)rcu_gp &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex &obj_hash[i].lock pool_lock irq_context: 0 &group->mark_mutex fs_reclaim &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 &group->mark_mutex &cfs_rq->removed.lock irq_context: 0 &group->mark_mutex &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &c->lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond13#3 irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fs_reclaim &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond14#3 irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond20#2 irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex quarantine_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond15#2 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 tomoyo_ss &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#7 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond16#3 irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock &c->lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &cfs_rq->removed.lock irq_context: 0 &group->mark_mutex &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override kfence_freelist_lock irq_context: 0 ebt_mutex ebt_mutex.wait_lock irq_context: 0 ebt_mutex.wait_lock irq_context: 0 &group->mark_mutex lock &group->inotify_data.idr_lock &c->lock irq_context: 0 sb_writers#8 remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 sb_writers#8 remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond18#2 irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#2 irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem rcu_node_0 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 rtnl_mutex uevent_sock_mutex batched_entropy_u8.lock irq_context: 0 rtnl_mutex uevent_sock_mutex kfence_freelist_lock irq_context: 0 rtnl_mutex uevent_sock_mutex &meta->lock irq_context: 0 (wq_completion)bond19#2 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &n->list_lock irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 sb_writers#3 &mm->mmap_lock &rq->__lock irq_context: 0 sb_writers#3 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond16#4 irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond20#3 irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem &rq->__lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond17#2 irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rcu_node_0 irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond21#3 irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#3 irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#2 irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->mark_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &group->mark_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &group->mark_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key &xa->xa_lock#7 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &mm->mmap_lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bond19#3 irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#2 irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&app->join_timer)#2 &app->lock#2 batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond13#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond24 irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#3 irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond24#2 irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq irq_context: 0 rtnl_mutex &root->kernfs_rwsem &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond25 irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &br->hash_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &br->hash_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key rcu_node_0 irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond26 irq_context: 0 (wq_completion)bond26 &rq->__lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 rtnl_mutex (switchdev_blocking_notif_chain).rwsem &rq->__lock irq_context: 0 rtnl_mutex (switchdev_blocking_notif_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (debug_obj_work).work &rq->__lock irq_context: 0 (wq_completion)events (debug_obj_work).work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex netpoll_srcu &rq->__lock irq_context: 0 rtnl_mutex netpoll_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 nf_sockopt_mutex nf_sockopt_mutex.wait_lock irq_context: 0 nf_sockopt_mutex.wait_lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock &c->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond27 irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bond28 irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &iint->mutex stock_lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex pcpu_lock stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem rcu_node_0 irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu pool_lock#2 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#7 &pl->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#7 &pl->lock key#12 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#8 &____s->seqcount#2 irq_context: 0 sb_writers#8 &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss batched_entropy_u8.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss kfence_freelist_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &meta->lock irq_context: 0 (wq_completion)bond26#2 irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29 irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex &rcu_state.expedited_wq irq_context: 0 &xt[i].mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond30 irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &rq->__lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 sb_writers &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 fs_reclaim &rq->__lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond31 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#21 rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#21 rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#21 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#21 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#21 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#21 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#21 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond32 irq_context: 0 &mm->mmap_lock remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq rcu_callback rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq drivers/net/wireguard/ratelimiter.c:20 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock &obj_hash[i].lock irq_context: 0 sb_writers#3 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond33 irq_context: 0 (wq_completion)bond33 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond33 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem quarantine_lock irq_context: 0 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &rq->__lock irq_context: 0 dup_mmap_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond33 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond33 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond34 irq_context: 0 (wq_completion)bond34 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond34 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u8.lock irq_context: 0 &root->kernfs_iattr_rwsem &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 &root->kernfs_iattr_rwsem &obj_hash[i].lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond34 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond34 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 &xt[i].mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond35 irq_context: 0 (wq_completion)bond35 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond35 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &group->mark_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond35 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond35 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond36 irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock batched_entropy_u8.lock crngs.lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond37 irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 &type->i_mutex_dir_key#3 rcu_node_0 irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock fs_reclaim &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock fs_reclaim &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#2 irq_context: 0 (wq_completion)bond33#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond33#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond33#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond38 irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 rcu_node_0 irq_context: 0 (wq_completion)bond34#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond34#2 irq_context: 0 (wq_completion)bond34#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond34#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond34#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond34#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond34#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond34#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond33#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond33#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond33#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond33#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond33#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 &p->lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &p->lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond39 irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond39 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond35#2 irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond39 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond39 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond39 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond39 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond39 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond39 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond39 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond39 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond39 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond39 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rq->__lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond40 irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond40 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#2 irq_context: 0 (wq_completion)bond36#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond36#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond36#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond40 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: softirq (&ifibss->timer) irq_context: softirq (&ifibss->timer) &rdev->wiphy_work_lock irq_context: softirq (&ifibss->timer) rcu_read_lock &pool->lock irq_context: softirq (&ifibss->timer) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&ifibss->timer) rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&ifibss->timer) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&ifibss->timer) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex net_rwsem &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond36#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond36#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond36#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond36#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond36#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond41 irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond41 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond41 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex pcpu_lock stock_lock irq_context: 0 (wq_completion)bond37#2 irq_context: 0 (wq_completion)bond37#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond37#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond37#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond37#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond37#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond37#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond37#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond40 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond40 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond40 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond40 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond40 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond40 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond40 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond40 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond40 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond40 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond41 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond41 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond41 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond41 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond41 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond41 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond41 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond41 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond41 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond41 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &bgl->locks[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &hwstats->hwsdev_list_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &hwstats->hwsdev_list_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle key#4 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#2 irq_context: 0 (wq_completion)bond38#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond38#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond38#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#4 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond39#2 irq_context: 0 (wq_completion)bond39#2 &rq->__lock irq_context: 0 (wq_completion)bond39#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42 irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond42 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#2 irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43 irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 loop_validate_mutex &rq->__lock irq_context: 0 loop_validate_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond43 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond43 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond43 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond43 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond42 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond42 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond42 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond42 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond42 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond42 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond42 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond42 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond42 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond38#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond38#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond38#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond38#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond38#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond43 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond43 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond43 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond43 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond43 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond43 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond43 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond43 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond43 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond43 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond43 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond43 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond43 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond43 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond43 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond41#2 irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex smc_ib_devices.mutex &rq->__lock irq_context: 0 rtnl_mutex smc_ib_devices.mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond44 irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 sb_internal jbd2_handle bit_wait_table + i irq_context: 0 &ret->b_state_lock rcu_read_lock pool_lock#2 irq_context: 0 &ret->b_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss batched_entropy_u8.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss kfence_freelist_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &meta->lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#2 irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond45 irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond45 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond45 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond45 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond45 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond45 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond45 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond45 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond45 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond45 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond45 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond45 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond45 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond45 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond45 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 remove_cache_srcu rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock kfence_freelist_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 lock#4 &pcp->lock &zone->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 lock#4 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond43#2 irq_context: 0 (wq_completion)bond43#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond43#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond43#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)bond43#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond43#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond43#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond43#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond43#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock kfence_freelist_lock irq_context: 0 sb_writers#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock &meta->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond46 irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#2 irq_context: 0 (wq_completion)bond44#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond44#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond44#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond44#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] batched_entropy_u8.lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond47 irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#9 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 (wq_completion)bond48 irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &lock->wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &lock->wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond49 irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond44#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond44#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond46 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 &mm->mmap_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 kn->active#5 rcu_node_0 irq_context: 0 (wq_completion)bond50 irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_node_0 irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &rcu_state.expedited_wq irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond10#2 irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex net_rwsem rcu_node_0 irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bond51 irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex batched_entropy_u8.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_state_lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_state_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &rcu_state.expedited_wq irq_context: 0 rtnl_mutex &hwstats->hwsdev_list_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &hwstats->hwsdev_list_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 nl_table_wait.lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 &group->mark_mutex &fsnotify_mark_srcu &rq->__lock irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET slock-AF_INET &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET slock-AF_INET &base->lock irq_context: 0 sk_lock-AF_INET slock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu quarantine_lock irq_context: 0 &root->kernfs_rwsem rcu_node_0 irq_context: 0 &root->kernfs_rwsem &rcu_state.expedited_wq irq_context: 0 &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond53 irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu &c->lock irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#3 irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 kn->active#5 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 kn->active#5 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond54 irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond27#2 irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#2 irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond55 irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond28#2 irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond56 irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key &xa->xa_lock#7 &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key &xa->xa_lock#7 pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond29#2 irq_context: 0 (wq_completion)bond29#2 &rq->__lock irq_context: 0 (wq_completion)bond29#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond57 irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 rtnl_mutex wq_pool_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 &mm->mmap_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond30#2 irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock batched_entropy_u8.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond58 irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond13#4 irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#3 irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond31#2 irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) &pcp->lock &zone->lock &____s->seqcount irq_context: softirq (&tw->tw_timer) irq_context: softirq (&tw->tw_timer) &hashinfo->ehash_locks[i] irq_context: softirq (&tw->tw_timer) &tcp_hashinfo.bhash[i].lock irq_context: softirq (&tw->tw_timer) &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq (&tw->tw_timer) &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: softirq (&tw->tw_timer) &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: softirq (&tw->tw_timer) &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &____s->seqcount irq_context: softirq (&tw->tw_timer) &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: softirq (&tw->tw_timer) &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock rcu_read_lock pool_lock#2 irq_context: softirq (&tw->tw_timer) &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond59 irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock quarantine_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond14#4 irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond32#2 irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &anon_vma->rwsem &rq->__lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &anon_vma->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond30#3 irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond60 irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond33#3 irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond31#3 irq_context: 0 (wq_completion)bond31#3 &rq->__lock irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond61 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond34#3 irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond32#3 irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#3 irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem pgd_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem stock_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem key irq_context: 0 rtnl_mutex &root->kernfs_rwsem pcpu_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem percpu_counters_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem pcpu_lock stock_lock irq_context: 0 (wq_completion)bond62 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#3 irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond63 irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#3 irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 (wq_completion)bond33#4 irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond64 irq_context: 0 (wq_completion)bond64 &rq->__lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond16#5 irq_context: 0 (wq_completion)bond16#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond16#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond16#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#3 irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond33#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond14#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 rtnl_mutex dpm_list_mtx &rq->__lock irq_context: 0 rtnl_mutex dpm_list_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond32#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond16#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond16#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond16#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond16#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond16#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond13#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 rtnl_mutex &br->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &br->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond65 irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#3 irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 kn->active#5 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond17#3 irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &____s->seqcount irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 sb_writers#8 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond66 irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond39#3 irq_context: 0 (wq_completion)bond39#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond39#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond39#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq &(&conn->disc_work)->timer irq_context: softirq &(&conn->disc_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&conn->disc_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&conn->disc_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&conn->disc_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&conn->disc_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&conn->disc_work)->work) irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&conn->disc_work)->work) pool_lock#2 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&conn->disc_work)->work) &list->lock#9 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&conn->disc_work)->work) irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&conn->disc_work)->work) &list->lock#9 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&conn->disc_work)->work) irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&conn->disc_work)->work) &list->lock#9 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&conn->disc_work)->work) irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&conn->disc_work)->work) &list->lock#9 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&conn->disc_work)->work) irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&conn->disc_work)->work) &list->lock#9 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&conn->disc_work)->work) irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&conn->disc_work)->work) &c->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&conn->disc_work)->work) &____s->seqcount#2 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&conn->disc_work)->work) &____s->seqcount irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&conn->disc_work)->work) pool_lock#2 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&conn->disc_work)->work) &list->lock#9 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond67 irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond40#3 irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#4 irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 delayed_uprobe_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &obj_hash[i].lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->mark_mutex lock &group->inotify_data.idr_lock &____s->seqcount#2 irq_context: 0 &group->mark_mutex lock &group->inotify_data.idr_lock &____s->seqcount irq_context: 0 (wq_completion)bond18#4 irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond68 irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#4 irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex uevent_sock_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond69 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &meta->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_node_0 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)bond70 irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex remove_cache_srcu rcu_node_0 irq_context: 0 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond71 irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &meta->lock irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &root->kernfs_rwsem kfence_freelist_lock irq_context: 0 &p->lock &mm->mmap_lock &rq->__lock irq_context: 0 &p->lock &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock &of->mutex kn->active#5 fs_reclaim &rq->__lock irq_context: 0 &p->lock &of->mutex kn->active#5 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond37#4 irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &p->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &p->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &p->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond73 irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&p->wq) purge_vmap_area_lock quarantine_lock irq_context: 0 rtnl_mutex &rnp->exp_wq[1] irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 ptlock_ptr(page)#2/1 rcu_read_lock &p->pi_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 ptlock_ptr(page)#2/1 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 ptlock_ptr(page)#2/1 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock crngs.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: softirq net/ipv4/devinet.c:474 irq_context: softirq net/ipv4/devinet.c:474 rcu_read_lock &pool->lock irq_context: softirq net/ipv4/devinet.c:474 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&net->ipv6.addr_chk_work)->timer rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq net/ipv4/devinet.c:474 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond74 irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#3 &rq->__lock irq_context: 0 (wq_completion)bond40#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 cb_lock batched_entropy_u8.lock irq_context: 0 cb_lock kfence_freelist_lock irq_context: 0 kn->active#5 fs_reclaim &rq->__lock irq_context: 0 kn->active#5 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond38#4 irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75 irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#4 irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) rcu_node_0 irq_context: 0 rtnl_mutex &idev->mc_lock rcu_node_0 irq_context: 0 rtnl_mutex &idev->mc_lock &rcu_state.expedited_wq irq_context: 0 rtnl_mutex &idev->mc_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex &idev->mc_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond76 irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond21#4 irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond39#4 irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77 irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem &rq->__lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond22#3 irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: softirq (&in_dev->mr_ifc_timer) fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78 irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock quarantine_lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &pnn->pndevs.lock &rq->__lock irq_context: 0 rtnl_mutex &pnn->pndevs.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond40#4 irq_context: 0 (wq_completion)bond40#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond40#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond40#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &c->lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79 irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond75 &rq->__lock irq_context: 0 (wq_completion)bond75 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#7 &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#7 &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#7 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80 irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond42#3 irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work quarantine_lock irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond81 irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override kfence_freelist_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle rcu_node_0 irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond23#4 irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond43#3 irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &bgl->locks[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 &p->lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond24#3 irq_context: 0 rtnl_mutex bus_type_sem &rq->__lock irq_context: 0 rtnl_mutex bus_type_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (reaper_work).work &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond37#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond83 irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &c->lock irq_context: 0 (wq_completion)bond84 irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#3 irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&tcp_orphan_timer) fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&tcp_orphan_timer) fill_pool_map-wait-type-override &c->lock irq_context: softirq (&tcp_orphan_timer) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&tcp_orphan_timer) fill_pool_map-wait-type-override &____s->seqcount irq_context: softirq (&tcp_orphan_timer) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock hwsim_radio_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &list->lock#19 irq_context: softirq rcu_read_lock &sta->rate_ctrl_lock irq_context: softirq rcu_read_lock &sta->rate_ctrl_lock &c->lock irq_context: softirq rcu_read_lock &sta->rate_ctrl_lock pool_lock#2 irq_context: softirq rcu_read_lock &sta->rate_ctrl_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &sta->rate_ctrl_lock krc.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->ampdu_mlme.mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->ampdu_mlme.mtx &sta->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->ampdu_mlme.work) irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rnp->exp_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx krc.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->key_mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &fq->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &xa->xa_lock#7 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &fsnotify_mark_srcu irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &s->s_inode_list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &xa->xa_lock#7 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock mount_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx mount_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->active_txq_lock[i] irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->drv_deliver_wk) irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &sig->cred_guard_mutex tomoyo_ss fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond25#2 irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond85 irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond26#3 irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 &pcp->lock &zone->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock batched_entropy_u8.lock crngs.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond27#3 irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &____s->seqcount#2 irq_context: 0 rtnl_mutex &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond86 irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond28#3 irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond87 irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq fs/notify/mark.c:89 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &br->hash_lock quarantine_lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond29#4 irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond46#2 irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock &____s->seqcount#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond88 irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_node_0 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond39#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond39#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond39#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond35#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 pgd_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 key irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 pcpu_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 percpu_counters_lock irq_context: 0 (wq_completion)bond89 irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#4 irq_context: 0 (wq_completion)bond30#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond30#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond30#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond30#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond30#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 rcu_node_0 irq_context: 0 (wq_completion)bond69#2 irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond30#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond30#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond30#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond30#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond30#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond34#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond40#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond40#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond40#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond40#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond40#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond30#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond30#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond30#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond30#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond30#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond30#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond30#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond30#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond30#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond30#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond30#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond30#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond30#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond30#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond30#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond31#4 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_node_0 irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond31#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond92 irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond70#2 irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#9 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond47#2 irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem kfence_freelist_lock irq_context: 0 (wq_completion)bond93 irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &meta->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->mark_mutex remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) rcu_node_0 irq_context: 0 &group->mark_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->mark_mutex &lock->wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[1] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond95 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond96 irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &mm->mmap_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 &mm->mmap_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &ht->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97 irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond49#2 irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &nft_net->commit_mutex &rq->__lock irq_context: 0 rtnl_mutex &nft_net->commit_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override init_task.mems_allowed_seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)bond98 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &meta->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex &idev->mc_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond99 irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond34#5 irq_context: 0 (wq_completion)bond34#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond34#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond34#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond34#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond34#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond34#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond34#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond34#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond34#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond34#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond34#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond34#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond34#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond34#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond34#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond34#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond34#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond34#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond34#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond34#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond34#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond34#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond50#2 irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_node_0 irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100 irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#2 irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 purge_vmap_area_lock &meta->lock irq_context: 0 purge_vmap_area_lock kfence_freelist_lock irq_context: 0 &iint->mutex ima_extend_list_mutex rcu_read_lock rcu_node_0 irq_context: 0 &iint->mutex ima_extend_list_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond35#5 irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &c->lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override pool_lock irq_context: softirq rcu_read_lock hwsim_radio_lock batched_entropy_u8.lock irq_context: softirq rcu_read_lock hwsim_radio_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx kfence_freelist_lock irq_context: 0 (wq_completion)bond102 irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond76#2 irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&w->w) &obj_hash[i].lock pool_lock irq_context: softirq (&app->join_timer) fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&app->join_timer) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex freezer_mutex.wait_lock irq_context: 0 (wq_completion)bond104 irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock quarantine_lock irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: softirq (&peer->timer_send_keepalive) &____s->seqcount#2 irq_context: softirq (&peer->timer_send_keepalive) &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond35#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond78#2 irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#5 irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond54#2 irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79#2 irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 tomoyo_ss mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 tomoyo_ss mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond37#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond55#2 irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &p->lock batched_entropy_u8.lock irq_context: 0 &p->lock kfence_freelist_lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond106 irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 remove_cache_srcu &rq->__lock &obj_hash[i].lock irq_context: 0 remove_cache_srcu &rq->__lock &base->lock irq_context: 0 remove_cache_srcu &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)bond38#5 irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock genl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond65#2 irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#2 irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107 irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#2 irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex deferred_probe_mutex &rq->__lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &rq->__lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond108 irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond57#2 irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond82 irq_context: 0 (wq_completion)bond82 &rq->__lock irq_context: 0 (wq_completion)bond82 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &meta->lock irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond39#5 irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 key#24 irq_context: 0 (wq_completion)bond109 irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond58#2 irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &obj_hash[i].lock irq_context: softirq drivers/char/random.c:251 rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) krc.lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) krc.lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond65#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond38#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond59#2 irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &n->list_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond112 irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 batched_entropy_u8.lock irq_context: 0 &type->i_mutex_dir_key#3 kfence_freelist_lock irq_context: 0 (wq_completion)bond60#2 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond84#2 irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond59#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond84#2 &rq->__lock irq_context: 0 (wq_completion)bond84#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem pgd_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem stock_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem key irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem pcpu_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem percpu_counters_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem pcpu_lock stock_lock irq_context: 0 (wq_completion)bond113 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond60#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 sb_writers#8 kn->active#5 batched_entropy_u8.lock irq_context: 0 sb_writers#8 kn->active#5 kfence_freelist_lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex quarantine_lock irq_context: 0 (wq_completion)bond42#4 irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) wq_pool_mutex &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond115 irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#4 irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#3 irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 &f->f_pos_lock sb_writers#3 &mm->mmap_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex pool_lock#2 irq_context: 0 (wq_completion)bond116 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock &c->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_node_0 irq_context: 0 rcu_read_lock (console_sem).lock irq_context: 0 rcu_read_lock console_owner_lock irq_context: 0 rcu_read_lock console_owner irq_context: 0 rcu_read_lock console_lock console_srcu console_owner_lock irq_context: 0 rcu_read_lock console_lock console_srcu console_owner irq_context: 0 rcu_read_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 rcu_read_lock console_lock console_srcu console_owner console_owner_lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117 irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond87#2 irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond48#2 irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond49#3 irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond64#2 irq_context: 0 (wq_completion)bond64#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 rtnl_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem &cfs_rq->removed.lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)bond64#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond64#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond64#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond64#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond64#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond64#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond64#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond119 irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)bond50#3 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_node_0 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond64#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond64#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond64#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond64#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond64#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond64#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond64#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond64#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond64#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond64#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond64#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond64#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond64#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond65#3 irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rename_lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond51#2 irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond88#2 irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: softirq (&pool->idle_timer) irq_context: softirq (&pool->idle_timer) &pool->lock/1 irq_context: softirq (&pool->idle_timer) &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&pool->idle_timer) &pool->lock/1 &base->lock irq_context: softirq (&pool->idle_timer) &pool->lock/1 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fs_reclaim &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121 irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#3 &rq->__lock irq_context: 0 (wq_completion)bond50#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex.wait_lock irq_context: 0 (wq_completion)bond89#2 irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal rcu_node_0 irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond51#2 &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond124 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&ifibss->timer) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond65#3 &rq->__lock irq_context: 0 (wq_completion)bond125 irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->work_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->work_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond126 irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond64#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond64#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond64#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond64#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond64#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock quarantine_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rnp->exp_wq[1] irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)bond127 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)bond128 irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond51#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 &tsk->futex_exit_mutex &cfs_rq->removed.lock irq_context: 0 &tsk->futex_exit_mutex &obj_hash[i].lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&pool->idle_timer) &pool->lock irq_context: softirq (&pool->idle_timer) &pool->lock &obj_hash[i].lock irq_context: softirq (&pool->idle_timer) &pool->lock &base->lock irq_context: softirq (&pool->idle_timer) &pool->lock &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock stock_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bond91#2 irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &group->mark_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock rcu_node_0 irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 (wq_completion)bond131 irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#2 irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 fs_reclaim &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &n->list_lock &c->lock irq_context: 0 (wq_completion)bond132 irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &meta->lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond95#2 irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock fs_reclaim &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock fs_reclaim &obj_hash[i].lock irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond134 irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) kfence_freelist_lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &meta->lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond96#2 irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 sb_writers#8 &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond135 irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &obj_hash[i].lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond136 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (t) irq_context: softirq (t) &obj_hash[i].lock irq_context: softirq (t) &base->lock irq_context: softirq (t) &base->lock &obj_hash[i].lock irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock &c->lock irq_context: softirq security/integrity/ima/ima_queue_keys.c:35 irq_context: softirq security/integrity/ima/ima_queue_keys.c:35 rcu_read_lock &pool->lock irq_context: softirq security/integrity/ima/ima_queue_keys.c:35 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq security/integrity/ima/ima_queue_keys.c:35 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq security/integrity/ima/ima_queue_keys.c:35 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq security/integrity/ima/ima_queue_keys.c:35 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (ima_keys_delayed_work).work irq_context: 0 (wq_completion)events (ima_keys_delayed_work).work ima_keys_lock irq_context: 0 (wq_completion)events (ima_keys_delayed_work).work &obj_hash[i].lock irq_context: 0 (wq_completion)events (ima_keys_delayed_work).work pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &meta->lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond65#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock &of->mutex kn->active#5 pgd_lock irq_context: 0 &p->lock &of->mutex kn->active#5 key irq_context: 0 &p->lock &of->mutex kn->active#5 pcpu_lock irq_context: 0 &p->lock &of->mutex kn->active#5 percpu_counters_lock irq_context: 0 &p->lock &of->mutex kn->active#5 &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond98#2 irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work pool_lock irq_context: 0 (wq_completion)bond139 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond140 irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_alloc_mutex &rq->__lock irq_context: softirq &(&ssp->srcu_sup->work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond141 irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101 irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu &obj_hash[i].lock irq_context: 0 &iint->mutex ima_extend_list_mutex rcu_node_0 irq_context: 0 (wq_completion)bond143 irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &tsk->futex_exit_mutex pool_lock#2 irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu &c->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145 irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146 irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &fsnotify_mark_srcu rcu_read_lock rcu_node_0 irq_context: 0 &fsnotify_mark_srcu rcu_read_lock &rq->__lock irq_context: 0 &fsnotify_mark_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex uevent_sock_mutex pgd_lock irq_context: 0 rtnl_mutex uevent_sock_mutex key irq_context: 0 rtnl_mutex uevent_sock_mutex pcpu_lock irq_context: 0 rtnl_mutex uevent_sock_mutex percpu_counters_lock irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond147 irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)events (linkwatch_work).work &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (linkwatch_work).work fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond148 irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) rcu_node_0 irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond103#2 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) quarantine_lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock &c->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) pool_lock#2 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#3 irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 fill_pool_map-wait-type-override pgd_lock irq_context: 0 fill_pool_map-wait-type-override key irq_context: 0 fill_pool_map-wait-type-override pcpu_lock irq_context: 0 fill_pool_map-wait-type-override percpu_counters_lock irq_context: 0 cb_lock genl_mutex &meta->lock irq_context: 0 (wq_completion)bond110 irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 nf_sockopt_mutex &cfs_rq->removed.lock irq_context: 0 nf_sockopt_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)bond153 irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override pgd_lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override key irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override pcpu_lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override percpu_counters_lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond105#2 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154 irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#2 irq_context: 0 &sb->s_type->i_mutex_key#12 &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &lock->wait_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &pool->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &lock->wait_lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &p->pi_lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113#2 irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page)#2 ptlock_ptr(page)#2/1 rcu_read_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond98#3 irq_context: 0 (wq_completion)bond98#3 &rq->__lock irq_context: 0 (wq_completion)bond98#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#2 irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond106#2 irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)bond156 irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond115#2 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#2 irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 irq_context: 0 (wq_completion)bond107#2 &rq->__lock irq_context: 0 (wq_completion)bond107#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex fs_reclaim &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex fs_reclaim &obj_hash[i].lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond157 irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bond116#2 irq_context: 0 (wq_completion)bond116#2 &rq->__lock irq_context: 0 (wq_completion)bond116#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#2 irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#2 irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond158 irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond117#2 irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#2 irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#2 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&in_dev->mr_ifc_timer) fill_pool_map-wait-type-override &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&in_dev->mr_ifc_timer) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118#2 irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 &sem->wait_lock irq_context: 0 &sig->cred_guard_mutex &sem->wait_lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#3 irq_context: 0 (wq_completion)bond103#3 &rq->__lock irq_context: 0 (wq_completion)bond103#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_node_0 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)events (work_completion)(&w->w) nfc_devlist_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &n->list_lock irq_context: softirq (&peer->timer_retransmit_handshake) &peer->endpoint_lock &obj_hash[i].lock irq_context: softirq (&peer->timer_retransmit_handshake) &peer->endpoint_lock pool_lock#2 irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond119#2 irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104#2 irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#2 irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 rtnl_mutex uevent_sock_mutex stock_lock irq_context: 0 rtnl_mutex uevent_sock_mutex pcpu_lock stock_lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock &c->lock irq_context: 0 (wq_completion)bond161 irq_context: 0 (wq_completion)bond161 &rq->__lock irq_context: 0 (wq_completion)bond161 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond161 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond161 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 &cfs_rq->removed.lock irq_context: 0 kn->active#5 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond120#2 irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105#3 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 batched_entropy_u8.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 kfence_freelist_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &meta->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond111 irq_context: 0 (wq_completion)bond111 &rq->__lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq &(&idev->mc_dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond162 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#2 irq_context: 0 (wq_completion)bond121#2 &rq->__lock irq_context: 0 (wq_completion)bond121#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond106#3 irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_long &rq->__lock irq_context: 0 (wq_completion)events_long &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163 irq_context: 0 (wq_completion)bond163 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond163 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#2 irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#3 irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond161 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond112#2 irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#21 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#21 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond123#2 irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164 irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond163 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond163 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond108#3 irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165 irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#2 irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond166 irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond126#2 irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond111#2 irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond167 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond127#2 irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112#3 irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond168 irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond128#2 irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond113#4 irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114#3 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rcu_state.expedited_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#2 irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond114#4 irq_context: 0 (wq_completion)bond114#4 &rq->__lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond115#3 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169 irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#4 irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &info->lock key#9 irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond131#2 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &meta->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#3 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#12 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#4 irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171 irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#3 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond133#2 irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118#3 irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172 irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond119#3 irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond135#2 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond173 irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#4 irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond116#3 &rq->__lock irq_context: 0 (wq_completion)bond116#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120#3 irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond136#2 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174 irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#4 irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#3 irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137 irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175 irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond122#3 irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &group->mark_mutex remove_cache_srcu pool_lock#2 irq_context: softirq &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond138#2 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176 irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond123#3 irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119#4 irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#2 irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond177 irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#3 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140#2 irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond125#3 irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178 irq_context: 0 (wq_completion)bond178 &rq->__lock irq_context: 0 (wq_completion)bond178 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond141#2 irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex nl_table_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_node_0 irq_context: 0 (wq_completion)bond120#4 irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179 irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#2 irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond121#4 irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#3 irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180 irq_context: 0 (wq_completion)bond180 &rq->__lock irq_context: 0 (wq_completion)bond180 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#2 irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &pcp->lock &zone->lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond122#4 irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond128#3 irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond123#3 &rq->__lock irq_context: 0 (wq_completion)bond144 irq_context: 0 (wq_completion)bond144 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond144 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#4 irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#3 irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 oom_adj_mutex rcu_node_0 irq_context: 0 (wq_completion)bond146#2 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124#4 irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex quarantine_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond131#3 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond132#2 irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond144 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond144 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond117#2 &rq->__lock irq_context: 0 (wq_completion)bond117#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#2 irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#4 irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#2 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond134#3 irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond126#4 irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 batched_entropy_u8.lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 kfence_freelist_lock irq_context: 0 (wq_completion)bond150#2 irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172 &rq->__lock irq_context: 0 (wq_completion)bond172 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#3 irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151 &rq->__lock irq_context: 0 (wq_completion)bond151 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127#4 irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#2 irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond136#3 irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock quarantine_lock irq_context: 0 (wq_completion)bond152#2 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137#2 irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: 0 (wq_completion)bond153#2 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] batched_entropy_u8.lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] batched_entropy_u8.lock crngs.lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] kfence_freelist_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#4 irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond138#3 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154#2 irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 irq_context: 0 (wq_completion)bond141#2 rcu_node_0 irq_context: 0 (wq_completion)bond141#2 &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond141#2 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond141#2 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond141#2 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#2 &rq->__lock irq_context: 0 (wq_completion)bond141#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#3 &rq->__lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond140#3 irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#3 irq_context: 0 (wq_completion)bond141#3 &rq->__lock irq_context: 0 (wq_completion)bond141#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond134#2 &rq->__lock irq_context: 0 (wq_completion)bond134#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss &obj_hash[i].lock pool_lock irq_context: 0 tomoyo_ss fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond157#2 irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond147#2 &rq->__lock irq_context: 0 (wq_completion)bond147#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond142#3 irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#4 irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 remove_cache_srcu &____s->seqcount irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#2 irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond67#2 irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond143#3 irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond159#2 irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle irq_context: 0 (wq_completion)bond144#2 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#3 irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#2 irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145#3 irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond68#2 irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond161#2 irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#4 irq_context: 0 (wq_completion)bond133#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond133#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond133#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond133#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond133#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 dup_mmap_sem pgd_lock irq_context: 0 dup_mmap_sem stock_lock irq_context: 0 dup_mmap_sem &obj_hash[i].lock irq_context: 0 dup_mmap_sem key irq_context: 0 dup_mmap_sem pcpu_lock irq_context: 0 dup_mmap_sem percpu_counters_lock irq_context: 0 dup_mmap_sem pcpu_lock stock_lock irq_context: 0 dup_mmap_sem &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond133#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond133#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond133#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond133#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond133#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond133#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond133#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond133#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond133#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond133#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond133#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond133#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146#3 irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#3 &rq->__lock irq_context: 0 (wq_completion)bond115#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162#2 irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond134#4 irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond159#2 &rq->__lock irq_context: 0 (wq_completion)bond159#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147#3 irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#2 irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#3 irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 &rq->__lock irq_context: 0 (wq_completion)bond120 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164#2 irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond69#3 irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#2 &rq->__lock irq_context: 0 (wq_completion)bond127#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164 &rq->__lock irq_context: 0 (wq_completion)bond164 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &cfs_rq->removed.lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond165#2 irq_context: 0 (wq_completion)bond165#2 &rq->__lock irq_context: 0 (wq_completion)bond165#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond70#3 irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond135#4 irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond150#3 irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond166#2 irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond71#2 irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond136#4 irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#3 irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bat_priv->bla.work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: softirq &(&bat_priv->bla.work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq &(&bat_priv->bla.work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: softirq &(&bat_priv->bla.work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond167#2 irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond72 irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#3 irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#3 &rq->__lock irq_context: 0 (wq_completion)bond114#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137#3 irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex pool_lock#2 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond168#2 irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138#4 irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond153#3 irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#2 irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#2 irq_context: 0 (wq_completion)bond73#2 &rq->__lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#3 irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond170#2 irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond155#2 irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#2 irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond75#3 irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#4 irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond156#3 irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172#2 irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#3 irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140#4 irq_context: 0 (wq_completion)bond140#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond140#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond140#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond140#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond140#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &fsnotify_mark_srcu &rq->__lock irq_context: 0 sb_writers#5 &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond173#2 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock irq_context: softirq &fq->mq_flush_lock bit_wait_table + i &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond140#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond140#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond140#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond140#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond140#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond140#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond140#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond140#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond140#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond140#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond140#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond140#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond140#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond158#3 irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh quarantine_lock irq_context: 0 (wq_completion)bond76#3 irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174#2 irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond159#3 irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock init_task.mems_allowed_seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond175#2 irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#3 irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond141#4 irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 cb_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 cb_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond161#3 irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond140#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142#4 irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#2 irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#2 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#3 irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178#2 irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond111#2 &rq->__lock irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond163#3 irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#4 irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 cb_lock remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179#2 irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180#2 irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond165#3 irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond144#3 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex remove_cache_srcu irq_context: 0 cb_lock genl_mutex remove_cache_srcu quarantine_lock irq_context: 0 cb_lock genl_mutex remove_cache_srcu &c->lock irq_context: 0 cb_lock genl_mutex remove_cache_srcu &n->list_lock irq_context: 0 cb_lock genl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#3 irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#2 irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex remove_cache_srcu &rq->__lock irq_context: 0 cb_lock genl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond166#3 irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &fsnotify_mark_srcu &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond167#2 &rq->__lock irq_context: 0 (wq_completion)bond167#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#2 &rq->__lock irq_context: 0 (wq_completion)bond148#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182 irq_context: 0 (wq_completion)bond182 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond182 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond167#3 irq_context: 0 (wq_completion)bond167#3 &rq->__lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond183 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#3 irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#3 irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184 irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#3 irq_context: 0 (wq_completion)bond169#3 &rq->__lock irq_context: 0 (wq_completion)bond169#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145#4 irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185 irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock pgd_lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock key irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock pcpu_lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock percpu_counters_lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond81#3 irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond170#3 irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond186 irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond171#3 irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond141#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond187 irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_state.barrier_mutex rcu_read_lock &stopper->lock irq_context: 0 rcu_state.barrier_mutex rcu_read_lock &stop_pi_lock irq_context: 0 rcu_state.barrier_mutex rcu_read_lock &stop_pi_lock &rq->__lock irq_context: 0 rcu_state.barrier_mutex rcu_read_lock &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172#3 irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond82#2 irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188 irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond173#3 irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &stopper->lock irq_context: 0 rtnl_mutex rcu_read_lock &stop_pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &stop_pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond145#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&peer->timer_retransmit_handshake) &obj_hash[i].lock irq_context: softirq (&peer->timer_retransmit_handshake) &list->lock#17 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond83#3 irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &fsnotify_mark_srcu fs_reclaim &rq->__lock irq_context: 0 &fsnotify_mark_srcu fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond147#4 irq_context: 0 (wq_completion)bond147#4 &rq->__lock irq_context: 0 (wq_completion)bond147#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond189 irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond135#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond174#3 irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond190 irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#4 &rq->__lock irq_context: 0 (wq_completion)bond125#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond175#3 irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#3 irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond83#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond176#3 irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond192 irq_context: 0 (wq_completion)bond192 &rq->__lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#2 irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)bond148#4 irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#3 irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 &rq->__lock irq_context: 0 (wq_completion)bond152#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond193 irq_context: 0 (wq_completion)bond193 &rq->__lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond86#2 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex stack_depot_init_mutex &rq->__lock irq_context: 0 rtnl_mutex stack_depot_init_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#4 irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem pool_lock#2 irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 &kernfs_locks->open_file_mutex[count] &cfs_rq->removed.lock irq_context: 0 sb_writers#8 fs_reclaim &rq->__lock irq_context: 0 sb_writers#8 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond194 irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#3 &rq->__lock irq_context: 0 (wq_completion)bond175#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174 &rq->__lock irq_context: 0 (wq_completion)bond174 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179#3 irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond150#4 irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond195 irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)bond180#3 irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond87#3 irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond191 irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond196 irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175 &rq->__lock irq_context: 0 (wq_completion)bond175 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181#3 irq_context: 0 (wq_completion)bond181#3 &rq->__lock irq_context: 0 (wq_completion)bond181#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond192#2 irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond197 irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#3 &rq->__lock irq_context: 0 (wq_completion)bond140#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond182#2 irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#3 irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond182 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond182 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond193#2 irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond198 irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond176#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#4 irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond192 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond183#2 irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#2 irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond194#2 irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond199 irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 &rq->__lock irq_context: 0 (wq_completion)bond184#2 irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond195#2 irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET &c->lock irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond200 irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &pcp->lock &zone->lock irq_context: softirq &fq->mq_flush_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq &fq->mq_flush_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond185#2 irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond196#2 irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond201 irq_context: 0 (wq_completion)bond201 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond201 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond171#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond197#2 irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond201 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond201 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond202 irq_context: 0 (wq_completion)bond202 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond202 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &p->lock &of->mutex kn->active#5 batched_entropy_u8.lock irq_context: 0 &p->lock &of->mutex kn->active#5 kfence_freelist_lock irq_context: 0 &p->lock &of->mutex kn->active#5 &meta->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond186#2 irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187#2 irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#2 &rq->__lock irq_context: 0 (wq_completion)bond160#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#21 (console_sem).lock irq_context: 0 &sb->s_type->i_mutex_key#21 console_owner_lock irq_context: 0 &sb->s_type->i_mutex_key#21 console_owner irq_context: 0 &sb->s_type->i_mutex_key#21 console_lock console_srcu console_owner_lock irq_context: 0 &sb->s_type->i_mutex_key#21 console_lock console_srcu console_owner irq_context: 0 &sb->s_type->i_mutex_key#21 console_lock console_srcu console_owner &port_lock_key irq_context: 0 &sb->s_type->i_mutex_key#21 console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)bond198#2 irq_context: 0 (wq_completion)bond198#2 &rq->__lock irq_context: 0 (wq_completion)bond198#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 &iint->mutex (console_sem).lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 &iint->mutex console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex console_lock console_srcu console_owner irq_context: 0 &iint->mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &iint->mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)bond203 irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &iint->mutex console_owner_lock irq_context: 0 &iint->mutex console_owner irq_context: 0 (wq_completion)bond168 &rq->__lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond188#2 irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond67#2 &rq->__lock irq_context: 0 (wq_completion)bond67#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond204 irq_context: 0 (wq_completion)bond204 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond204 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock &base->lock irq_context: 0 &u->iolock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180#2 &rq->__lock irq_context: 0 (wq_completion)bond180#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount irq_context: 0 rcu_state.barrier_mutex &cfs_rq->removed.lock irq_context: 0 rcu_state.barrier_mutex &obj_hash[i].lock irq_context: 0 rcu_state.barrier_mutex pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex pgd_lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex stock_lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_state.barrier_mutex key irq_context: 0 rtnl_mutex rcu_state.barrier_mutex pcpu_lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex percpu_counters_lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex pcpu_lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#3 &rq->__lock irq_context: 0 (wq_completion)bond199#2 irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond189#2 irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond199#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond148#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond180#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond190#2 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond143#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond204 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond204 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond200#2 irq_context: 0 (wq_completion)bond200#2 &rq->__lock irq_context: 0 (wq_completion)bond200#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond205 irq_context: 0 (wq_completion)bond205 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond205 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#5 fs_reclaim &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond191#2 irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond206 irq_context: 0 (wq_completion)bond206 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond206 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond197 &rq->__lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 rtnl_mutex fs_reclaim &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond192#3 irq_context: 0 (wq_completion)bond192#3 &rq->__lock irq_context: 0 (wq_completion)bond192#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond207 irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond202 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond193#3 irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond208 irq_context: 0 (wq_completion)bond208 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond208 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 (wq_completion)bond194#3 irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond209 irq_context: 0 (wq_completion)bond209 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond209 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (reaper_work).work fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond195#3 irq_context: 0 (wq_completion)bond195#3 &rq->__lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)bond210 irq_context: 0 (wq_completion)bond210 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond210 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#2 &rq->__lock irq_context: 0 (wq_completion)bond155#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond196#3 irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[0] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond211 irq_context: 0 (wq_completion)bond211 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond211 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond197#3 irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock (console_sem).lock irq_context: 0 &mm->mmap_lock console_lock console_srcu console_owner_lock irq_context: 0 &mm->mmap_lock console_lock console_srcu console_owner irq_context: 0 &mm->mmap_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &mm->mmap_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)bond212 irq_context: 0 (wq_completion)bond212 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond212 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 kn->active#5 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 kn->active#5 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu pool_lock#2 irq_context: 0 &xt[i].mutex purge_vmap_area_lock &meta->lock irq_context: 0 &xt[i].mutex purge_vmap_area_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond198#3 irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond213 irq_context: 0 (wq_completion)bond213 &rq->__lock irq_context: 0 (wq_completion)bond213 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond213 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond213 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock &c->lock irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock console_owner_lock irq_context: 0 &mm->mmap_lock console_owner irq_context: softirq (&mp->timer) irq_context: softirq (&mp->timer) &br->multicast_lock irq_context: softirq (&mp->timer) &br->multicast_lock pool_lock#2 irq_context: softirq (&mp->timer) &br->multicast_lock &dir->lock#2 irq_context: softirq (&mp->timer) &br->multicast_lock deferred_lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock &pool->lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&mp->timer) &br->multicast_lock &c->lock irq_context: softirq (&mp->timer) &br->multicast_lock nl_table_lock irq_context: softirq (&mp->timer) &br->multicast_lock &obj_hash[i].lock irq_context: softirq (&mp->timer) &br->multicast_lock nl_table_wait.lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq (&p->timer) irq_context: softirq (&p->timer) &br->multicast_lock irq_context: softirq (&p->timer) &br->multicast_lock &c->lock irq_context: softirq (&p->timer) &br->multicast_lock &n->list_lock irq_context: softirq (&p->timer) &br->multicast_lock &n->list_lock &c->lock irq_context: softirq (&p->timer) &br->multicast_lock pool_lock#2 irq_context: softirq (&p->timer) &br->multicast_lock &dir->lock#2 irq_context: softirq (&p->timer) &br->multicast_lock deferred_lock irq_context: softirq (&p->timer) &br->multicast_lock nl_table_lock irq_context: softirq (&p->timer) &br->multicast_lock &obj_hash[i].lock irq_context: softirq (&p->timer) &br->multicast_lock nl_table_wait.lock irq_context: softirq (&p->timer) &br->multicast_lock &base->lock irq_context: softirq (&p->timer) &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: softirq (&p->timer) &br->multicast_lock rcu_read_lock &pool->lock irq_context: softirq (&p->timer) &br->multicast_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&p->timer) &br->multicast_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq (&p->timer) &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&mp->timer) &br->multicast_lock &____s->seqcount#2 irq_context: softirq (&mp->timer) &br->multicast_lock &____s->seqcount irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) &br->multicast_lock irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) (&mp->timer) irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) &base->lock irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) pool_lock#2 irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) krc.lock irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) (&p->rexmit_timer) irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) (&p->timer) irq_context: 0 (wq_completion)bond214 irq_context: 0 (wq_completion)bond214 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond214 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond199#3 irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&p->timer) &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&p->timer) &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&mp->timer) &br->multicast_lock &n->list_lock irq_context: softirq (&mp->timer) &br->multicast_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond214 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond214 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond213 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond213 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond215 irq_context: 0 (wq_completion)bond215 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond215 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond200#3 irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 kn->active#5 remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 kn->active#5 remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &fsnotify_mark_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 &fsnotify_mark_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &fsnotify_mark_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &fsnotify_mark_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 fs_reclaim rcu_node_0 irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond202 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond205 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond216 irq_context: 0 (wq_completion)bond216 &rq->__lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond216 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond214 &rq->__lock irq_context: 0 (wq_completion)bond214 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 (wq_completion)bond201#2 irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181#2 &rq->__lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond209 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond209 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond209 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond216 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond216 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex pgd_lock irq_context: 0 &xt[i].mutex key irq_context: 0 &xt[i].mutex pcpu_lock irq_context: 0 &xt[i].mutex percpu_counters_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond196#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond202#2 irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 stock_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bond218 irq_context: 0 (wq_completion)bond218 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond218 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179 &rq->__lock irq_context: 0 (wq_completion)bond179 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond203#2 irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond219 irq_context: 0 (wq_completion)bond219 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond219 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock pgd_lock irq_context: 0 &mm->mmap_lock key irq_context: 0 &mm->mmap_lock pcpu_lock irq_context: 0 &mm->mmap_lock percpu_counters_lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond205 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond205 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond192#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 (console_sem).lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 console_lock console_srcu console_owner_lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 console_lock console_srcu console_owner irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 console_lock console_srcu console_owner &port_lock_key irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)bond204#2 irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 console_owner_lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 console_owner irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock (work_completion)(flush) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond220 irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#4 &rq->__lock irq_context: 0 (wq_completion)bond141#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 (wq_completion)bond205#2 irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#21 &n->list_lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_node_0 irq_context: 0 sb_writers#8 tomoyo_ss &rcu_state.expedited_wq irq_context: 0 sb_writers#8 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#8 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#21 &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond221 irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond206#2 irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond219 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond219 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond208 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond208 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond222 irq_context: 0 (wq_completion)bond222 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond222 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond191#2 &rq->__lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond207#2 irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] (console_sem).lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] console_lock console_srcu console_owner_lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] console_lock console_srcu console_owner irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] console_lock console_srcu console_owner &port_lock_key irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond177#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond179#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond200#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond198#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond212 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond212 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond212 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond133#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond133#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond133#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond133#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond133#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond150#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond223 irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond208#2 irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] console_owner_lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] console_owner irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh quarantine_lock irq_context: 0 (wq_completion)bond224 irq_context: 0 (wq_completion)bond224 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond224 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)bond209#2 irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond225 irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond210#2 irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_alloc_mutex &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock base_crng.lock irq_context: 0 (wq_completion)bond70#3 &rq->__lock irq_context: 0 (wq_completion)bond70#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&peer->timer_persistent_keepalive) batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 rtnl_mutex cpu_hotplug_lock &x->wait#10 irq_context: 0 rtnl_mutex cpu_hotplug_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex pool_lock irq_context: 0 rtnl_mutex &ht->mutex rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &ht->mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond226 irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond227 irq_context: 0 (wq_completion)bond227 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond227 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond193#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond152#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond211 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond211 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond173#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond212#2 irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#3 (console_sem).lock irq_context: 0 &f->f_pos_lock sb_writers#3 console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#3 console_lock console_srcu console_owner irq_context: 0 &f->f_pos_lock sb_writers#3 console_lock console_srcu console_owner &port_lock_key irq_context: 0 &f->f_pos_lock sb_writers#3 console_lock console_srcu console_owner console_owner_lock irq_context: 0 &f->f_pos_lock sb_writers#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#3 console_owner_lock irq_context: 0 &f->f_pos_lock sb_writers#3 console_owner irq_context: 0 (wq_completion)bond228 irq_context: 0 (wq_completion)bond228 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond228 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond213#2 irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#5 irq_context: 0 (wq_completion)bond6#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond6#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond6#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond6#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rcu_read_lock &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond6#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond6#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond6#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_node_0 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &rcu_state.expedited_wq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond208#2 &rq->__lock irq_context: 0 (wq_completion)bond208#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond6#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond6#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond6#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond6#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond6#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond6#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond6#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond6#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond6#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond6#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond6#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond6#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond229 irq_context: 0 (wq_completion)bond229 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond229 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214#2 irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#3 irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#2 &rq->__lock irq_context: 0 (wq_completion)bond90#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock pidmap_lock batched_entropy_u8.lock irq_context: 0 lock pidmap_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 (wq_completion)bond215#2 irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#2 &rq->__lock irq_context: 0 (wq_completion)bond154#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond231 irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rcu_node_0 irq_context: 0 (wq_completion)bond212#2 &rq->__lock irq_context: 0 (wq_completion)bond212#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond216#2 irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond232 irq_context: 0 (wq_completion)bond232 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond232 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond197#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond217 irq_context: 0 (wq_completion)bond217 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond217 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond233 irq_context: 0 (wq_completion)bond233 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond233 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &xt[i].mutex &mm->mmap_lock rcu_node_0 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex &mm->mmap_lock &rcu_state.expedited_wq irq_context: 0 &xt[i].mutex &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &xt[i].mutex &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &xt[i].mutex &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock &of->mutex kn->active#5 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond218#2 irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 sb_writers#4 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond234 irq_context: 0 (wq_completion)bond234 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond234 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond8#4 irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &sbi->s_orphan_lock &lock->wait_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &lock->wait_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &p->pi_lock irq_context: 0 (wq_completion)bond219#2 irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond229 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164#2 &rq->__lock irq_context: 0 (wq_completion)bond164#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond235 irq_context: 0 (wq_completion)bond235 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond235 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond220#2 irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond236 irq_context: 0 (wq_completion)bond236 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond236 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond236 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond221#2 irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond166#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 sb_writers#4 sb_internal &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond237 irq_context: 0 (wq_completion)bond237 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond237 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)bond222#2 irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond238 irq_context: 0 (wq_completion)bond238 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond238 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 nf_sockopt_mutex pool_lock#2 irq_context: 0 nf_sockopt_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond223#2 irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 key#25 irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond224#2 irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond10#3 irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#3 &rq->__lock irq_context: 0 (wq_completion)bond170#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond240 irq_context: 0 (wq_completion)bond240 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond240 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond196#3 &rq->__lock irq_context: 0 (wq_completion)bond196#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond225#2 irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond90#2 &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond90#2 &obj_hash[i].lock irq_context: 0 (wq_completion)bond241 irq_context: 0 (wq_completion)bond241 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond241 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond226#2 irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185#2 &rq->__lock irq_context: 0 (wq_completion)bond185#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond224 &rq->__lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond242 irq_context: 0 (wq_completion)bond242 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond242 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond227#2 irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond243 irq_context: 0 (wq_completion)bond243 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond243 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock hwsim_radio_lock &zone->lock irq_context: 0 (wq_completion)bond228#2 irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond241 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond241 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond217 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond217 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond217 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond244 irq_context: 0 (wq_completion)bond244 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond244 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond240 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond240 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond243 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond243 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond134#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond229#2 irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond195#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond245 irq_context: 0 (wq_completion)bond245 &rq->__lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond245 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond13#5 irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond237 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond237 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond230 irq_context: 0 (wq_completion)bond230 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond230 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond246 irq_context: 0 (wq_completion)bond246 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond246 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &iint->mutex rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond231#2 irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond198 &rq->__lock irq_context: 0 (wq_completion)bond198 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond238 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond238 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond247 irq_context: 0 (wq_completion)bond247 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond247 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond232#2 irq_context: 0 (wq_completion)bond232#2 &rq->__lock irq_context: 0 (wq_completion)bond232#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond246 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond246 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_node_0 irq_context: 0 (wq_completion)bond14#5 irq_context: 0 (wq_completion)bond14#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond14#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond14#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond14#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond14#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond14#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond248 irq_context: 0 (wq_completion)bond248 &rq->__lock irq_context: 0 (wq_completion)bond248 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond248 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond248 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond230 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond230 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond233#2 irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond202 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond202 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond202 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond249 irq_context: 0 (wq_completion)bond249 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond249 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond14#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond14#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond14#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond14#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond249 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond249 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond249 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond234#2 irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond222 &rq->__lock irq_context: 0 (wq_completion)bond222 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond250 irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond15#4 irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond235#2 irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 delayed_uprobe_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 sb_internal jbd2_handle irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond236#2 irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond252 irq_context: 0 (wq_completion)bond252 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond252 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond237#2 irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond253 irq_context: 0 (wq_completion)bond253 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond253 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond238#2 irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#2 &rq->__lock irq_context: 0 (wq_completion)bond175#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond16#6 irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &lock->wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &lock->wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &p->pi_lock irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond254 irq_context: 0 (wq_completion)bond254 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond254 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond239 irq_context: 0 (wq_completion)bond239 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond239 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond227 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond17#4 irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond255 irq_context: 0 (wq_completion)bond255 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond255 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond255 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond255 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond255 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond210#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond199#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex quarantine_lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond240#2 irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond245 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond245 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond174#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond227#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss mount_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss mount_lock rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)bond256 irq_context: 0 (wq_completion)bond256 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond256 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond241#2 irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond257 irq_context: 0 (wq_completion)bond257 &rq->__lock irq_context: 0 (wq_completion)bond257 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond236#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond254 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond254 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond234 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond234 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond6#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond6#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond6#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond6#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond6#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond224 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond224 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond242 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond242 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &meta->lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond195#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond242#2 irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond258 irq_context: 0 (wq_completion)bond258 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond258 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond231#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &sb->s_type->i_lock_key#22 bit_wait_table + i irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex batched_entropy_u8.lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex kfence_freelist_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &meta->lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 key#25 irq_context: 0 (wq_completion)bond243#2 irq_context: 0 (wq_completion)bond243#2 &rq->__lock irq_context: 0 (wq_completion)bond243#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond259 irq_context: 0 (wq_completion)bond259 &rq->__lock irq_context: 0 (wq_completion)bond259 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond259 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond259 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond21#5 irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond244#2 irq_context: 0 (wq_completion)bond244#2 &rq->__lock irq_context: 0 (wq_completion)bond244#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond244#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond244#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond244#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond244#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond244#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond244#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond244#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond244#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond244#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond244#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond244#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond244#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond244#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond244#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond244#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond244#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond244#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond244#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond260 irq_context: 0 (wq_completion)bond260 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond260 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond260 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond260 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond260 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond260 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond260 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond260 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond245#2 irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)bond261 irq_context: 0 (wq_completion)bond261 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond261 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&pool->idle_timer) rcu_read_lock &pool->lock/1 irq_context: softirq (&pool->idle_timer) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&pool->idle_timer) rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq (&pool->idle_timer) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock/1 &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock/1 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex.wait_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) pool_lock#2 irq_context: 0 wq_pool_attach_mutex &cfs_rq->removed.lock irq_context: 0 wq_pool_attach_mutex &obj_hash[i].lock irq_context: 0 wq_pool_attach_mutex pool_lock#2 irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient &rq->__lock irq_context: 0 (wq_completion)events_power_efficient &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond233 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond226#3 irq_context: 0 (wq_completion)bond226#3 &rq->__lock irq_context: 0 (wq_completion)bond226#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond226#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond226#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond226#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond226#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond226#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond226#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond226#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond226#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond246#2 irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond262 irq_context: 0 (wq_completion)bond262 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond262 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)bond262 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond247#2 irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond220#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond263 irq_context: 0 (wq_completion)bond263 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond263 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond14#5 &rq->__lock irq_context: 0 (wq_completion)bond14#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond248#2 irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond226#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond226#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond226#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond226#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond226#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond260 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond260 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond260 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond260 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond260 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond258 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond258 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond227#3 irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond261 &rq->__lock irq_context: 0 (wq_completion)bond261 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond22#4 irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond8#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &xt[i].mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond215#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond263 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond263 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond263 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond232#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond174#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond264 irq_context: 0 (wq_completion)bond264 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond264 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond264 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond264 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond264 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond264 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond264 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond264 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond249#2 irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond228#3 irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond265 irq_context: 0 (wq_completion)bond265 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond265 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond264 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond264 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#5 irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)bond229#2 &rq->__lock irq_context: 0 (wq_completion)bond229#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond250#2 irq_context: 0 (wq_completion)bond250#2 &rq->__lock irq_context: 0 (wq_completion)bond250#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond265 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond265 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond264 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond264 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond264 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond264 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond264 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond266 irq_context: 0 (wq_completion)bond266 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond266 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond266 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond266 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond266 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond266 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond266 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond266 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond251 irq_context: 0 (wq_completion)bond251 &rq->__lock irq_context: 0 (wq_completion)bond251 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond251 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond251 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond251 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond251 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond251 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond251 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond251 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond251 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond251 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond251 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond251 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond251 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond251 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond251 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond251 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond251 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond251 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond251 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond251 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond251 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond253 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond253 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond261 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond248 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond248 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond24#4 irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond22#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond229#3 irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond267 irq_context: 0 (wq_completion)bond267 &rq->__lock irq_context: 0 (wq_completion)bond267 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond267 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond267 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond267 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond267 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond267 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond267 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond267 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond267 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond267 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond267 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond267 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond213#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond138#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond260 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond260 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond252#2 irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#3 irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond268 irq_context: 0 (wq_completion)bond268 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond268 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#5 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond228#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond253#2 irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond254#2 irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond262 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond262 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond262 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond270 irq_context: 0 (wq_completion)bond270 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond270 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond230#2 irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#4 irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond255#2 irq_context: 0 (wq_completion)bond255#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond255#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond255#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond255#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond255#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond255#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond255#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond255#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &meta->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond271 irq_context: 0 (wq_completion)bond271 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond271 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond256#2 irq_context: 0 (wq_completion)bond256#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond256#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond256#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond256#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond256#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond256#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond256#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond256#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond240#2 (work_completion)(&(&bond->mcast_work)->work) &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond27#4 irq_context: 0 (wq_completion)bond27#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond27#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond27#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond27#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond27#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond206 &rq->__lock irq_context: 0 (wq_completion)bond272 irq_context: 0 (wq_completion)bond272 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond272 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond257#2 irq_context: 0 (wq_completion)bond257#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond257#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond257#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond257#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond257#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond257#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond257#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond257#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond257#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond257#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond231#3 irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond272 &rq->__lock irq_context: 0 (wq_completion)bond272 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond273 irq_context: 0 (wq_completion)bond273 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond273 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond258#2 irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#4 irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond232#3 irq_context: 0 (wq_completion)bond232#3 &rq->__lock irq_context: 0 (wq_completion)bond232#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond257#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond257#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond259#2 irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond274 irq_context: 0 (wq_completion)bond274 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond274 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 fill_pool_map-wait-type-override &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond29#5 irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond224#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond233#3 irq_context: 0 (wq_completion)bond233#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond233#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond233#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond233#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond233#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond233#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond235#2 &rq->__lock irq_context: 0 (wq_completion)bond235#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond233#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond233#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond233#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond233#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock pool_lock#2 irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond260#2 irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond275 irq_context: 0 (wq_completion)bond275 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond275 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 &kernfs_locks->open_file_mutex[count] krc.lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &kernfs_locks->open_file_mutex[count] krc.lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond30#5 irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond27#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond261#2 irq_context: 0 (wq_completion)bond261#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond261#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond261#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond261#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond261#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond261#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond261#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond261#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond276 irq_context: 0 (wq_completion)bond276 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond276 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond210 &rq->__lock irq_context: 0 (wq_completion)bond210 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 rcu_node_0 irq_context: 0 (wq_completion)bond27#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond27#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond27#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond27#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond27#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &rcu_state.expedited_wq irq_context: 0 &pipe->mutex/1 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &pipe->mutex/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond262#2 irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 (wq_completion)bond277 irq_context: 0 (wq_completion)bond277 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond277 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond230#2 &rq->__lock irq_context: 0 (wq_completion)bond230#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond214#2 &rq->__lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond226#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond31#5 irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#7 rcu_node_0 irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond263#2 irq_context: 0 (wq_completion)bond263#2 &rq->__lock irq_context: 0 (wq_completion)bond263#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond263#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond263#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond263#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond263#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond263#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond263#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond263#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond263#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond259 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond259 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond259 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond278 irq_context: 0 (wq_completion)bond278 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond278 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond272 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond272 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond272 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond273 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond32#4 irq_context: 0 (wq_completion)bond32#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond32#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond32#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond32#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond235 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond235 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond226#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond32#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond32#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond32#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond32#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond265 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond264#2 irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond216 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &meta->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond279 irq_context: 0 (wq_completion)bond279 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond279 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond273 &rq->__lock irq_context: 0 (wq_completion)bond273 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond33#5 irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond265#2 irq_context: 0 (wq_completion)bond265#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond265#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond265#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond265#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond265#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond265#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond265#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond265#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond14#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond280 irq_context: 0 (wq_completion)bond280 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond280 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond266#2 irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond238#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#6 irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond254#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond34#6 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond261#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond261#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond281 irq_context: 0 (wq_completion)bond281 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond281 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond233#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond233#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond265#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond265#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond267#2 irq_context: 0 (wq_completion)bond267#2 &rq->__lock irq_context: 0 (wq_completion)bond267#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond234#3 irq_context: 0 (wq_completion)bond234#3 &rq->__lock irq_context: 0 (wq_completion)bond234#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond226#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond227#3 &rq->__lock irq_context: 0 (wq_completion)bond268#2 irq_context: 0 (wq_completion)bond268#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond268#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond268#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond268#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond272 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond268#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond268#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond268#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond268#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond282 irq_context: 0 (wq_completion)bond282 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond282 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond266 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond266 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond267 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond267 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond267 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond267 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond267 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond280 &rq->__lock irq_context: 0 (wq_completion)bond280 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond269 irq_context: 0 (wq_completion)bond269 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond269 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond250#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond263#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond263#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond283 irq_context: 0 (wq_completion)bond283 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond283 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond214#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond229#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond276 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond172#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond35#6 irq_context: 0 (wq_completion)bond35#6 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond35#6 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#6 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond35#6 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#6 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond35#6 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond270#2 irq_context: 0 (wq_completion)bond270#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond270#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond270#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond270#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond270#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond270#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond270#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond270#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond270#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond270#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond32#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond32#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond32#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond32#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond32#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond268#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond268#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock &rq->__lock &base->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond284 irq_context: 0 (wq_completion)bond284 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond284 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond36#4 irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond271#2 irq_context: 0 (wq_completion)bond271#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond271#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond271#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond271#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond271#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond271#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond271#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond271#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &child->perf_event_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond235#3 irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond285 irq_context: 0 (wq_completion)bond285 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond285 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond271#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond271#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond271#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond271#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond271#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond271#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond271#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond271#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond271#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond271#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond271#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond271#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond271#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond212#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond236 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond266 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond266 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond272#2 irq_context: 0 (wq_completion)bond272#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond272#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond272#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond272#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond263#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond263#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond263#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond263#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond272#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond272#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond272#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond272#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond272#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond272#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond236#3 irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond286 irq_context: 0 (wq_completion)bond286 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond286 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond209 &rq->__lock irq_context: 0 (wq_completion)bond209 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond37#6 irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond237#3 irq_context: 0 (wq_completion)bond237#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond237#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond237#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond237#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond140#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond140#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond140#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond140#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond230#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond233#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond175#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond38#6 irq_context: 0 (wq_completion)bond38#6 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond38#6 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#6 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond38#6 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond283 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond38#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond38#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond38#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond38#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond38#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond38#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond38#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond38#6 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond38#6 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond38#6 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond38#6 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond38#6 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond38#6 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond38#6 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#6 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond38#6 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#6 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond38#6 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond274#2 irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &rq->__lock irq_context: 0 (wq_completion)bond27#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond257#2 &rq->__lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond30#5 &rq->__lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond276 &rq->__lock irq_context: 0 (wq_completion)bond276 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond238#3 irq_context: 0 (wq_completion)bond238#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond238#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond238#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond238#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond238#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond238#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond238#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond238#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond288 irq_context: 0 (wq_completion)bond288 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond288 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#4 &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond256 &rq->__lock irq_context: 0 (wq_completion)bond256 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond39#6 irq_context: 0 (wq_completion)bond39#6 &rq->__lock irq_context: 0 (wq_completion)bond39#6 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond39#6 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#6 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond39#6 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#6 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond39#6 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond39#6 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond39#6 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#6 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond39#6 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond275#2 irq_context: 0 (wq_completion)bond275#2 &rq->__lock irq_context: 0 (wq_completion)bond275#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond275#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond275#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond275#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond275#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond275#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond275#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond275#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond275#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond275#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond275#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond275#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond289 irq_context: 0 (wq_completion)bond289 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond289 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond274 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond274 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond274 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond239#2 irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond275#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond289 &rq->__lock irq_context: 0 (wq_completion)bond289 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond40#5 irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work &rq->__lock irq_context: 0 (wq_completion)bond276#2 irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond240#3 irq_context: 0 (wq_completion)bond240#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond240#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond240#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond240#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond240#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond240#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond240#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond240#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond240#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond290 irq_context: 0 (wq_completion)bond290 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond290 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#6 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond265#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond265#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond277#2 irq_context: 0 (wq_completion)bond277#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond277#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond277#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond277#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond277#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond277#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond277#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond277#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond241#3 irq_context: 0 (wq_completion)bond241#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond241#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond241#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond241#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond241#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond241#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond241#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond241#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond241#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond241#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond241#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond241#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond241#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond16#6 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond231#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond280 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond280 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond258#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 &disk->open_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond291 irq_context: 0 (wq_completion)bond291 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond291 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond270#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond270#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond270#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond270#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond270#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond229#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond277#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond277#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond277#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond277#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond277#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond261#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond261#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond261#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond261#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond261#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond252#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond256#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond256#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond256#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond256#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond256#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond286 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond286 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond286 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond286 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond286 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond136#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond246#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond245#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond221#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond255#2 &rq->__lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond13#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond242#3 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) pool_lock#2 irq_context: 0 (wq_completion)bond242#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond242#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond242#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond242#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond242#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond242#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond242#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond242#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond292 irq_context: 0 (wq_completion)bond292 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond292 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond292 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond292 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond292 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond292 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond292 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond292 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond292 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond292 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond292 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond292 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond292 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond289 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond289 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond240#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond240#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond240#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond240#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond240#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond266 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond266 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond266 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond266 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond266 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond282 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond282 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond264 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond264 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond237#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond293 irq_context: 0 (wq_completion)bond293 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond293 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond293 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond293 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond279#2 irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) rcu_node_0 irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond266#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond294 irq_context: 0 (wq_completion)bond294 &rq->__lock irq_context: 0 (wq_completion)bond294 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond294 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond294 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond272#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond272#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond280#2 irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 sb_internal jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond222 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond222 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond222 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond268#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond233 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond233 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond295 irq_context: 0 (wq_completion)bond295 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond295 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond295 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond295 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond244#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond244#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond295 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond295 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond295 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond295 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond295 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond295 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond295 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond295 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond295 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond295 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond295 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond295 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond295 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond295 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond295 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond295 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond295 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond295 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond295 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond295 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond295 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond295 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond295 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond295 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond279 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond279 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond279 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond280 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond236#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond281#2 irq_context: 0 (wq_completion)bond281#2 (work_completion)(&(&slave->notify_work)->work) irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond281#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond281#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond281#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond281#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond281#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond281#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond281#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond238#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond238#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond238#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond238#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond238#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond238#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond238#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond238#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond238#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond296 irq_context: 0 (wq_completion)bond296 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond296 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond282#2 irq_context: 0 (wq_completion)bond282#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond282#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond282#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond282#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond282#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond282#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond282#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond282#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond227#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond42#5 irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond241#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond277 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond277 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond277 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond35#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond35#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond35#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond35#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond35#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond35#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond35#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond35#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond35#6 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex quarantine_lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond276 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond296 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond294 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond294 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond294 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond249#2 &rq->__lock irq_context: 0 (wq_completion)bond249#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond218 &rq->__lock irq_context: 0 (wq_completion)bond218 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond297 irq_context: 0 (wq_completion)bond297 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond297 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond257#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond257#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond257#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond257#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond257#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond31#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond283#2 irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond291 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond291 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond43#5 irq_context: 0 (wq_completion)bond43#5 &rq->__lock irq_context: 0 (wq_completion)bond43#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond43#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond43#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond43#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond43#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond43#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond270#2 &rq->__lock irq_context: 0 (wq_completion)bond298 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bond298 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond298 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond298 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond298 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond290 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#4 &rq->__lock irq_context: 0 (wq_completion)bond117#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond284#2 irq_context: 0 (wq_completion)bond284#2 &rq->__lock irq_context: 0 (wq_completion)bond284#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond247 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond206 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond206 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond206 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond44#4 irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond244#2 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond299 irq_context: 0 (wq_completion)bond299 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond299 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond299 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond299 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond299 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond299 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond299 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond299 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond299 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond299 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond223#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond242#2 &rq->__lock irq_context: 0 (wq_completion)bond242#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond244 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond285#2 irq_context: 0 (wq_completion)bond285#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond285#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond285#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond285#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond285#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond285#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond285#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond285#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond285#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond285#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond285#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond285#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond285#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond285#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond285#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond285#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond285#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond285#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond292 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond43#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond205#2 &rq->__lock irq_context: 0 (wq_completion)bond300 irq_context: 0 (wq_completion)bond300 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond300 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond300 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond300 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond263#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond263#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond300 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond300 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond300 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond300 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond286#2 irq_context: 0 (wq_completion)bond286#2 &rq->__lock irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) cpu_hotplug_lock &rq->__lock irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond301 irq_context: 0 (wq_completion)bond301 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond301 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond272#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond272#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond272#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond272#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond272#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond270 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond270 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond39#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond39#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond39#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond39#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond39#6 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond290 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond290 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond290 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond290 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond290 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond265#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond265#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond265#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond265#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond265#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond293 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond264#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond300 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond300 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond300 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond300 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond300 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond300 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond300 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond300 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond300 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond300 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond300 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond300 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond300 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond300 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond300 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond300 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond300 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond300 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond300 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond300 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond300 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond300 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond300 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond300 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond283#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond21#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond287 irq_context: 0 (wq_completion)bond287 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond287 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond281#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond281#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond281#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond281#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond281#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond233#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond233#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond233#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond233#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond233#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &____s->seqcount irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &lock->wait_lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &p->pi_lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&peer->timer_retransmit_handshake) &list->lock#17 &obj_hash[i].lock irq_context: softirq (&peer->timer_retransmit_handshake) &list->lock#17 &____s->seqcount irq_context: softirq (&peer->timer_retransmit_handshake) &list->lock#17 pool_lock#2 irq_context: softirq (&peer->timer_retransmit_handshake) &list->lock#17 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond218#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond302 irq_context: 0 (wq_completion)bond302 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond302 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond302 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond302 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond302 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond302 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond302 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond302 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond302 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond302 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond302 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond302 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond302 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond302 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond302 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond302 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond302 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond302 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond302 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond302 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond302 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond302 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond302 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond302 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond255#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond255#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond255#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond255#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond255#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond242#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond242#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond242#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond242#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond242#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond33#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond278 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond288 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond293 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond293 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond293 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond293 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond293 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond232#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &c->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock nl_table_lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock nl_table_wait.lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock lock#8 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock id_table_lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &dir->lock#2 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock krc.lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock base_crng.lock irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 &root->kernfs_rwsem &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond267 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond288#2 irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond228 &rq->__lock irq_context: 0 (wq_completion)bond228 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond282 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond302 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond302 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond289#2 irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond240#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond240#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond303 irq_context: 0 (wq_completion)bond303 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond303 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond303 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond303 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond282#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond282#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond290#2 irq_context: 0 (wq_completion)bond290#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond290#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond290#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond290#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond290#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond290#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond290#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond290#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond43#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond287 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond304 irq_context: 0 (wq_completion)bond304 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond304 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond304 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond304 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond304 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond304 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond304 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond304 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond304 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond304 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle bit_wait_table + i irq_context: 0 (wq_completion)bond291#2 irq_context: 0 (wq_completion)bond291#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond291#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond268#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond268#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond268#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond268#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond268#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond291#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond291#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond291#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond291#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond291#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond291#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond291#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond291#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond291#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond215 &rq->__lock irq_context: 0 (wq_completion)bond215 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond301 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond301 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond301 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond301 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond27#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond27#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond27#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond27#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond299 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond299 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond299 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond299 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond299 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond291#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond291#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond291#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond291#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond291#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond291#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond291#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond291#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond291#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond291#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond291#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond291#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond291#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond291#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond269 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond269 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond303 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond303 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond303 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond303 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond303 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond297 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond297 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond285 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond285 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond285 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond285 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond285 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond285 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond298 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond276#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond298 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond298 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond298 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond298 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond298 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond286#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond37#6 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond289#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond297 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond269 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond291#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond291#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond291#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond291#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond299 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond299 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond299 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond299 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond299 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond299 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond299 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond299 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond299 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond299 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond299 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond299 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond299 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond299 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond299 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond299 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond299 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond299 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond299 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond299 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond299 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond299 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond299 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond299 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond299 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) pool_lock#2 irq_context: 0 (wq_completion)bond303 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond239#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond284#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond282#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond282#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond282#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond282#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond282#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond43#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond43#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond43#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond43#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond43#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond290#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond290#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond290#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond290#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond290#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond263#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond263#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond263#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond263#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond263#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond287 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond306 irq_context: 0 (wq_completion)bond306 &rq->__lock irq_context: 0 (wq_completion)bond306 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond306 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond306 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond292#2 irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137 &rq->__lock irq_context: 0 (wq_completion)bond137 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock &meta->lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond307 irq_context: 0 (wq_completion)bond307 &rq->__lock irq_context: 0 (wq_completion)bond307 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond307 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond307 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond307 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond307 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond293#2 irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond244 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond244 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond243#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond307 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond307 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond307 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond307 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond307 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond275 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond275 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond275 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond275 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond304 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 &iint->mutex mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond304 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond304 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond304 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond304 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond304 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond304 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond304 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond288#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond302 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond302 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond302 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond302 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond302 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond258 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond275#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond275#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond275#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond275#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond275#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond219#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex proc_inum_ida.xa_lock &c->lock irq_context: 0 rtnl_mutex proc_inum_ida.xa_lock &n->list_lock irq_context: 0 rtnl_mutex proc_inum_ida.xa_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond308 irq_context: 0 (wq_completion)bond308 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond308 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond308 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond308 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond294#2 irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond292 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond242#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond30#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond309 irq_context: 0 (wq_completion)bond309 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond309 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond309 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond309 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond309 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond309 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond309 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond309 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond309 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond309 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond309 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond309 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond310 irq_context: 0 (wq_completion)bond310 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond310 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond310 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond310 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond310 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond310 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond310 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond310 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond310 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond310 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond310 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond310 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond310 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond308 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond308 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond308 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond308 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond308 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond308 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond308 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond308 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond295 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond295 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &lock->wait_lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &p->pi_lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond220#2 &rq->__lock irq_context: 0 (wq_completion)bond220#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond296#2 irq_context: 0 (wq_completion)bond309 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond309 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond309 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond309 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond309 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond293#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond311 irq_context: 0 (wq_completion)bond311 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond311 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond311 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond311 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)bond311 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond311 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond311 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond311 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond235#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) pool_lock#2 irq_context: 0 (wq_completion)bond297#2 irq_context: 0 (wq_completion)bond297#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond297#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond297#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond297#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond225#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond297#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond297#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond297#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond297#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond312 irq_context: 0 (wq_completion)bond312 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond312 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond312 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond312 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond238#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond238#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond298#2 irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond313 irq_context: 0 (wq_completion)bond313 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond313 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond313 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond313 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &pcp->lock &zone->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond147#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond313 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond313 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond265#2 &rq->__lock irq_context: 0 (wq_completion)bond265#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond271#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond271#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond299#2 irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)bond247#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond314 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) rcu_node_0 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond314 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond314 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond314 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond314 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &iint->mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &iint->mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)bond300#2 irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &mm->mmap_lock rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &mm->mmap_lock rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &mm->mmap_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &mm->mmap_lock rcu_read_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &mm->mmap_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bond315 irq_context: 0 (wq_completion)bond315 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond315 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond315 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond315 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond315 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond315 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond315 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond315 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond315 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond315 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond315 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond315 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond315 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond315 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond315 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond315 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond315 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond315 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond315 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond315 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond315 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond297#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond297#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond297#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond297#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond297#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond297#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond297#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond301#2 irq_context: 0 (wq_completion)bond301#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond301#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond301#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond301#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond301#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond301#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond301#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond301#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond312 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond312 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond313 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex ima_extend_list_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond316 irq_context: 0 (wq_completion)bond316 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond316 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond316 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond316 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond316 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond316 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond316 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond316 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond262#2 &rq->__lock irq_context: 0 (wq_completion)bond262#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond302#2 irq_context: 0 (wq_completion)bond302#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond302#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond302#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond302#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond302#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond302#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond302#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond302#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#6 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond311 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond302#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond302#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond302#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond302#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond302#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond302#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond302#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond302#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond302#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond302#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond302#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond302#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond302#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond289 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond277#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond317 irq_context: 0 (wq_completion)bond317 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond317 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond317 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond317 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond303#2 irq_context: 0 (wq_completion)bond303#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond303#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond303#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond303#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond303#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond303#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond303#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond303#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond303#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond303#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond311 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond311 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond311 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond311 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond311 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond306 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock nl_table_lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock nl_table_wait.lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock lock#8 irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock id_table_lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &dir->lock#2 irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock krc.lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &c->lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond228#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond318 irq_context: 0 (wq_completion)bond318 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond318 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond318 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond318 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond318 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond318 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond318 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond318 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_node_0 irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond24#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond304#2 irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond314 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond318 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond318 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond318 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond318 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond318 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond318 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond318 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond318 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond318 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond318 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond318 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond318 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond318 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond318 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond318 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond318 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond240#2 &rq->__lock irq_context: 0 (wq_completion)bond240#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock pool_lock#2 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond303#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond303#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond312 &rq->__lock irq_context: 0 (wq_completion)bond312 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond253#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond307 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond222#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond142#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond259#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond305 irq_context: 0 (wq_completion)bond305 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond305 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond305 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond305 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond305 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond305 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond305 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond305 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond281 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond268 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond268 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond268 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond268 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond292#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond234#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond284 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond284 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond284 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rcu_state.gp_wq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond320 irq_context: 0 (wq_completion)bond320 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond320 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond320 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond320 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond320 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond320 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond320 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond320 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond320 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond320 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond320 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond320 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond320 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond317 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond300#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond303#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond303#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond303#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond303#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond303#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond304#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond305 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond305 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond305 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond305 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond305 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond301#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond301#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond301#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond301#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond301#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond316 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond316 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond316 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond316 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond316 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond312 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond306#2 irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &journal->j_checkpoint_mutex &fq->mq_flush_lock irq_context: 0 &journal->j_checkpoint_mutex &fq->mq_flush_lock &q->requeue_lock irq_context: 0 (wq_completion)rcu_gp &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp &obj_hash[i].lock irq_context: 0 &journal->j_checkpoint_mutex &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond216#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond321 irq_context: 0 (wq_completion)bond321 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond321 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond321 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond321 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond321 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond321 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond321 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond321 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond296#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond306#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond321 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond321 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond321 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond321 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond321 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond307#2 irq_context: 0 (wq_completion)bond307#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond307#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond307#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond307#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond307#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond307#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond307#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond307#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond260#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond322 irq_context: 0 (wq_completion)bond322 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond322 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond322 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond322 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond308#2 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond316 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond316 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond323 irq_context: 0 (wq_completion)bond323 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond323 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond323 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond323 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond323 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond323 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond323 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond323 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond323 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond323 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond323 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond323 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond323 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond323 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond323 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond323 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond323 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond323 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond323 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond323 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond323 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond323 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond323 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond323 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond323 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond323 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond323 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond323 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond322 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond307#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond307#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond307#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond307#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond307#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond249#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock nl_table_lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock nl_table_wait.lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock lock#8 irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock id_table_lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &dir->lock#2 irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock krc.lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &c->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond309#2 irq_context: 0 (wq_completion)bond309#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond309#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond309#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond309#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond309#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond309#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond309#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond309#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond239 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond239 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond324 irq_context: 0 (wq_completion)bond324 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond324 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond324 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond324 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond324 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond324 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond324 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond324 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond194#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond310#2 irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond325 irq_context: 0 (wq_completion)bond325 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond325 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond325 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond325 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond325 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond325 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond325 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond325 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond325 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond325 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond261 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond261 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond261 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond261 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond311#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond311#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond311#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond311#2 irq_context: 0 (wq_completion)bond311#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond311#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond311#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond311#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond311#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond237#2 &rq->__lock irq_context: 0 (wq_completion)bond324 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond324 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond324 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond324 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond324 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond324 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond324 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond324 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond324 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond324 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond324 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond324 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond324 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond324 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond324 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond324 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond324 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond324 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond324 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond324 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond324 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond324 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond324 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond324 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond324 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond325 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond325 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond325 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond325 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond325 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond325 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond325 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond325 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond325 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond325 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond325 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond325 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond325 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond325 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond325 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond325 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond309#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond309#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond309#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond309#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond309#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond315 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond315 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond315 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond315 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond315 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond315 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond315 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond315 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond315 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond318 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond318 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond318 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond318 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond318 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond311 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond311 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond326 irq_context: 0 (wq_completion)bond326 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond326 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond326 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond326 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond326 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond326 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond326 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond326 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond326 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond326 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond300 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond300 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond312#2 irq_context: 0 (wq_completion)bond312#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond312#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond312#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond312#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond312#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond312#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond312#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond312#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond327 irq_context: 0 (wq_completion)bond326 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond326 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond326 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond326 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond326 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond326 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond326 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond326 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond326 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond326 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond326 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond326 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond327 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond327 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond327 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond327 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond327 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond327 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond327 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond327 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond327 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond327 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond308#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond327 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond327 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond327 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond327 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond327 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond299#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond248#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond170#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond313#2 irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &dentry->d_lock &lru->node[i].lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &dentry->d_lock &lru->node[i].lock rcu_read_lock &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &dentry->d_lock &lru->node[i].lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond313#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond328 irq_context: 0 (wq_completion)bond328 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond328 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond328 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond328 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond328 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond328 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond328 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond328 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond328 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond328 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond280#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond306 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond314#2 irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond329 irq_context: 0 (wq_completion)bond329 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond329 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 pgd_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 key irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 pcpu_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 percpu_counters_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 pcpu_lock stock_lock irq_context: 0 (wq_completion)bond328 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond328 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond328 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond328 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond328 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond315#2 irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond330 irq_context: 0 (wq_completion)bond330 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond330 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond330 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond330 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond330 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond330 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond330 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond330 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond330 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond330 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond330 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond330 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond330 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond330 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond330 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond330 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond330 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond330 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond330 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond330 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond330 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond314#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond312#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond312#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond312#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond312#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond312#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond312#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond312#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond312#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond312#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond312#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond310#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond316#2 irq_context: 0 (wq_completion)bond316#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond316#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond316#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond316#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond316#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond316#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond316#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond316#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 sb_internal quarantine_lock irq_context: 0 (wq_completion)bond331 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 (wq_completion)bond331 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond331 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond331 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond331 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond274#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond262#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond234#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond331 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond332 irq_context: 0 (wq_completion)bond332 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond332 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond332 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond332 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond332 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond332 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond332 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond332 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &disk->open_mutex &lock->wait_lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond294#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond329 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond329 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond329 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond311#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond329 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond311#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond311#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond311#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond311#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond329 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond206 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond315#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond193#2 &rq->__lock irq_context: 0 (wq_completion)bond193#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond71#3 irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond318#2 irq_context: 0 (wq_completion)bond318#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond318#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond318#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond318#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond318#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond318#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond318#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond318#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond318#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond318#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond318#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond318#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond318#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond318#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond318#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond318#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond318#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond318#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond318#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond332 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond332 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond332 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond332 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond332 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond279#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#7 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond235#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond333 irq_context: 0 (wq_completion)bond333 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond333 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond333 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond333 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond295 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond333 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond333 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond333 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond333 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond271 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond319 irq_context: 0 (wq_completion)bond319 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond319 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond319 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond319 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond319 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond319 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond319 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond319 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond319 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond334 irq_context: 0 (wq_completion)bond334 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond334 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond334 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond334 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond334 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond334 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond334 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond334 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond334 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond334 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond324 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond73#3 irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond320#2 irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond270 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond331 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond335 irq_context: 0 (wq_completion)bond335 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond335 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond335 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond335 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond335 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond335 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond335 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond335 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond267#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond298#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond74#2 irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond321#2 irq_context: 0 (wq_completion)bond321#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond321#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond321#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond321#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond321#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond321#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond321#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond321#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond333 &rq->__lock irq_context: 0 (wq_completion)bond333 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond336 irq_context: 0 (wq_completion)bond336 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond336 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond336 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond336 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond333 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond333 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond333 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond333 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond333 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond333 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond333 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond333 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond333 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond333 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond333 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond333 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond333 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond333 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond333 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond333 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond333 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond333 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond333 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond333 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond333 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond333 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond333 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond333 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond320#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond75#4 irq_context: 0 (wq_completion)bond75#4 &rq->__lock irq_context: 0 (wq_completion)bond75#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond75#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond75#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond75#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond75#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond75#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond75#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond322#2 irq_context: 0 (wq_completion)bond322#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond322#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond322#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond322#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond322#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond322#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond322#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond322#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond322#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond337 irq_context: 0 (wq_completion)bond337 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond337 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond337 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond337 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond337 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond337 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond337 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond337 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#4 irq_context: 0 (wq_completion)bond76#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond76#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond76#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond229 &rq->__lock irq_context: 0 (wq_completion)bond229 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond296 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond76#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond76#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond76#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond323#2 irq_context: 0 (wq_completion)bond323#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond323#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond323#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond323#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond323#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond323#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond323#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond323#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond338 irq_context: 0 (wq_completion)bond338 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond338 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond338 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond338 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond338 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond338 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond338 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond338 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond338 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond338 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond338 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond337 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond337 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond337 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond337 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond337 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex quarantine_lock irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond336 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read